blob: 590a018d950e20f7ff50bd63b50a14f9adcd30b0 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200311static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200312 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000313{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200314 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200315 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200317
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200318 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200319 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200320 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200321 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100323 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200324 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200325
326 if (sections[lp.ptr].size.hi ||
327 range_covers_byte(sections[lp.ptr].offset_within_address_space,
328 sections[lp.ptr].size.lo, addr)) {
329 return &sections[lp.ptr];
330 } else {
331 return &sections[PHYS_SECTION_UNASSIGNED];
332 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200333}
334
Blue Swirle5548612012-04-21 13:08:33 +0000335bool memory_region_is_unassigned(MemoryRegion *mr)
336{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200337 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000338 && mr != &io_mem_watch;
339}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200340
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100341/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200342static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200343 hwaddr addr,
344 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200345{
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 MemoryRegionSection *section;
347 subpage_t *subpage;
348
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200349 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200350 if (resolve_subpage && section->mr->subpage) {
351 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 }
354 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200355}
356
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100357/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200358static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200359address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200361{
362 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200363 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200365
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200366 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367 /* Compute offset within MemoryRegionSection */
368 addr -= section->offset_within_address_space;
369
370 /* Compute offset within MemoryRegion */
371 *xlat = addr + section->offset_within_region;
372
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200373 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200374
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
380 * here.
381 *
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
385 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200387 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 return section;
391}
Jan Kiszka90260c62013-05-26 21:46:51 +0200392
Paolo Bonzini41063e12015-03-18 14:21:43 +0100393/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200394MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
395 hwaddr *xlat, hwaddr *plen,
396 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200397{
Avi Kivity30951152012-10-30 13:47:46 +0200398 IOMMUTLBEntry iotlb;
399 MemoryRegionSection *section;
400 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200401
402 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100403 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
404 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200405 mr = section->mr;
406
407 if (!mr->iommu_ops) {
408 break;
409 }
410
Le Tan8d7b8cb2014-08-16 13:55:37 +0800411 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200412 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
413 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700414 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200415 if (!(iotlb.perm & (1 << is_write))) {
416 mr = &io_mem_unassigned;
417 break;
418 }
419
420 as = iotlb.target_as;
421 }
422
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000423 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100424 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700425 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100426 }
427
Avi Kivity30951152012-10-30 13:47:46 +0200428 *xlat = addr;
429 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200430}
431
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100432/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200433MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000434address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200435 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200436{
Avi Kivity30951152012-10-30 13:47:46 +0200437 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000438 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
439
440 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200441
442 assert(!section->mr->iommu_ops);
443 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200444}
bellard9fa3e852004-01-04 18:06:42 +0000445#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000446
Andreas Färberb170fce2013-01-20 20:23:22 +0100447#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000448
Juan Quintelae59fb372009-09-29 22:48:21 +0200449static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200450{
Andreas Färber259186a2013-01-17 18:51:17 +0100451 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200452
aurel323098dba2009-03-07 21:28:24 +0000453 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
454 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100455 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100456 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000457
458 return 0;
459}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200460
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400461static int cpu_common_pre_load(void *opaque)
462{
463 CPUState *cpu = opaque;
464
Paolo Bonziniadee6422014-12-19 12:53:14 +0100465 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400466
467 return 0;
468}
469
470static bool cpu_common_exception_index_needed(void *opaque)
471{
472 CPUState *cpu = opaque;
473
Paolo Bonziniadee6422014-12-19 12:53:14 +0100474 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475}
476
477static const VMStateDescription vmstate_cpu_common_exception_index = {
478 .name = "cpu_common/exception_index",
479 .version_id = 1,
480 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200481 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400482 .fields = (VMStateField[]) {
483 VMSTATE_INT32(exception_index, CPUState),
484 VMSTATE_END_OF_LIST()
485 }
486};
487
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300488static bool cpu_common_crash_occurred_needed(void *opaque)
489{
490 CPUState *cpu = opaque;
491
492 return cpu->crash_occurred;
493}
494
495static const VMStateDescription vmstate_cpu_common_crash_occurred = {
496 .name = "cpu_common/crash_occurred",
497 .version_id = 1,
498 .minimum_version_id = 1,
499 .needed = cpu_common_crash_occurred_needed,
500 .fields = (VMStateField[]) {
501 VMSTATE_BOOL(crash_occurred, CPUState),
502 VMSTATE_END_OF_LIST()
503 }
504};
505
Andreas Färber1a1562f2013-06-17 04:09:11 +0200506const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200507 .name = "cpu_common",
508 .version_id = 1,
509 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400510 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200511 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200512 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100513 VMSTATE_UINT32(halted, CPUState),
514 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200515 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400516 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200517 .subsections = (const VMStateDescription*[]) {
518 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300519 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200520 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 }
522};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200523
pbrook9656f322008-07-01 20:01:19 +0000524#endif
525
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100526CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400527{
Andreas Färberbdc44642013-06-24 23:50:24 +0200528 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400529
Andreas Färberbdc44642013-06-24 23:50:24 +0200530 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100531 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200532 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100533 }
Glauber Costa950f1472009-06-09 12:15:18 -0400534 }
535
Andreas Färberbdc44642013-06-24 23:50:24 +0200536 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400537}
538
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000539#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000540void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000541{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000542 CPUAddressSpace *newas;
543
544 /* Target code should have set num_ases before calling us */
545 assert(asidx < cpu->num_ases);
546
Peter Maydell56943e82016-01-21 14:15:04 +0000547 if (asidx == 0) {
548 /* address space 0 gets the convenience alias */
549 cpu->as = as;
550 }
551
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000552 /* KVM cannot currently support multiple address spaces. */
553 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000554
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000555 if (!cpu->cpu_ases) {
556 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000557 }
Peter Maydell32857f42015-10-01 15:29:50 +0100558
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000559 newas = &cpu->cpu_ases[asidx];
560 newas->cpu = cpu;
561 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000562 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000563 newas->tcg_as_listener.commit = tcg_commit;
564 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000565 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000566}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000567
568AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
569{
570 /* Return the AddressSpace corresponding to the specified index */
571 return cpu->cpu_ases[asidx].as;
572}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000573#endif
574
Bharata B Raob7bca732015-06-23 19:31:13 -0700575#ifndef CONFIG_USER_ONLY
576static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
577
578static int cpu_get_free_index(Error **errp)
579{
580 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
581
582 if (cpu >= MAX_CPUMASK_BITS) {
583 error_setg(errp, "Trying to use more CPUs than max of %d",
584 MAX_CPUMASK_BITS);
585 return -1;
586 }
587
588 bitmap_set(cpu_index_map, cpu, 1);
589 return cpu;
590}
591
592void cpu_exec_exit(CPUState *cpu)
593{
594 if (cpu->cpu_index == -1) {
595 /* cpu_index was never allocated by this @cpu or was already freed. */
596 return;
597 }
598
599 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
600 cpu->cpu_index = -1;
601}
602#else
603
604static int cpu_get_free_index(Error **errp)
605{
606 CPUState *some_cpu;
607 int cpu_index = 0;
608
609 CPU_FOREACH(some_cpu) {
610 cpu_index++;
611 }
612 return cpu_index;
613}
614
615void cpu_exec_exit(CPUState *cpu)
616{
617}
618#endif
619
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700620void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000621{
Andreas Färberb170fce2013-01-20 20:23:22 +0100622 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000623 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700624 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000625
Peter Maydell56943e82016-01-21 14:15:04 +0000626 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000627 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000628
Eduardo Habkost291135b2015-04-27 17:00:33 -0300629#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300630 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300631#endif
632
pbrookc2764712009-03-07 15:24:59 +0000633#if defined(CONFIG_USER_ONLY)
634 cpu_list_lock();
635#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700636 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
637 if (local_err) {
638 error_propagate(errp, local_err);
639#if defined(CONFIG_USER_ONLY)
640 cpu_list_unlock();
641#endif
642 return;
bellard6a00d602005-11-21 23:25:50 +0000643 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200644 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200648 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
649 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
650 }
pbrookb3c77242008-06-30 16:31:04 +0000651#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600652 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700653 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100654 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200655 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000656#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100657 if (cc->vmsd != NULL) {
658 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
659 }
bellardfd6ce8f2003-05-14 19:00:11 +0000660}
661
Paul Brook94df27f2010-02-28 23:47:45 +0000662#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200663static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000664{
665 tb_invalidate_phys_page_range(pc, pc + 1, 0);
666}
667#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200668static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400669{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000670 MemTxAttrs attrs;
671 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
672 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400673 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000674 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100675 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400676 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400677}
bellardc27004e2005-01-03 23:35:10 +0000678#endif
bellardd720b932004-04-25 17:57:43 +0000679
Paul Brookc527ee82010-03-01 03:31:14 +0000680#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200681void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000682
683{
684}
685
Peter Maydell3ee887e2014-09-12 14:06:48 +0100686int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
687 int flags)
688{
689 return -ENOSYS;
690}
691
692void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
693{
694}
695
Andreas Färber75a34032013-09-02 16:57:02 +0200696int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000697 int flags, CPUWatchpoint **watchpoint)
698{
699 return -ENOSYS;
700}
701#else
pbrook6658ffb2007-03-16 23:58:11 +0000702/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200703int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000704 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000705{
aliguoric0ce9982008-11-25 22:13:57 +0000706 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000707
Peter Maydell05068c02014-09-12 14:06:48 +0100708 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700709 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200710 error_report("tried to set invalid watchpoint at %"
711 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000712 return -EINVAL;
713 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500714 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000715
aliguoria1d1bb32008-11-18 20:07:32 +0000716 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100717 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000718 wp->flags = flags;
719
aliguori2dc9f412008-11-18 20:56:59 +0000720 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200721 if (flags & BP_GDB) {
722 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
723 } else {
724 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
725 }
aliguoria1d1bb32008-11-18 20:07:32 +0000726
Andreas Färber31b030d2013-09-04 01:29:02 +0200727 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000728
729 if (watchpoint)
730 *watchpoint = wp;
731 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000732}
733
aliguoria1d1bb32008-11-18 20:07:32 +0000734/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200735int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000736 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000737{
aliguoria1d1bb32008-11-18 20:07:32 +0000738 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000739
Andreas Färberff4700b2013-08-26 18:23:18 +0200740 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100741 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000742 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200743 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000744 return 0;
745 }
746 }
aliguoria1d1bb32008-11-18 20:07:32 +0000747 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000748}
749
aliguoria1d1bb32008-11-18 20:07:32 +0000750/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200751void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000752{
Andreas Färberff4700b2013-08-26 18:23:18 +0200753 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000754
Andreas Färber31b030d2013-09-04 01:29:02 +0200755 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Anthony Liguori7267c092011-08-20 22:09:37 -0500757 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000758}
759
aliguoria1d1bb32008-11-18 20:07:32 +0000760/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200761void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000762{
aliguoric0ce9982008-11-25 22:13:57 +0000763 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000764
Andreas Färberff4700b2013-08-26 18:23:18 +0200765 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200766 if (wp->flags & mask) {
767 cpu_watchpoint_remove_by_ref(cpu, wp);
768 }
aliguoric0ce9982008-11-25 22:13:57 +0000769 }
aliguoria1d1bb32008-11-18 20:07:32 +0000770}
Peter Maydell05068c02014-09-12 14:06:48 +0100771
772/* Return true if this watchpoint address matches the specified
773 * access (ie the address range covered by the watchpoint overlaps
774 * partially or completely with the address range covered by the
775 * access).
776 */
777static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
778 vaddr addr,
779 vaddr len)
780{
781 /* We know the lengths are non-zero, but a little caution is
782 * required to avoid errors in the case where the range ends
783 * exactly at the top of the address space and so addr + len
784 * wraps round to zero.
785 */
786 vaddr wpend = wp->vaddr + wp->len - 1;
787 vaddr addrend = addr + len - 1;
788
789 return !(addr > wpend || wp->vaddr > addrend);
790}
791
Paul Brookc527ee82010-03-01 03:31:14 +0000792#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000793
794/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200795int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000796 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000797{
aliguoric0ce9982008-11-25 22:13:57 +0000798 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000799
Anthony Liguori7267c092011-08-20 22:09:37 -0500800 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000801
802 bp->pc = pc;
803 bp->flags = flags;
804
aliguori2dc9f412008-11-18 20:56:59 +0000805 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200806 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200807 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200808 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200809 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200810 }
aliguoria1d1bb32008-11-18 20:07:32 +0000811
Andreas Färberf0c3c502013-08-26 21:22:53 +0200812 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000813
Andreas Färber00b941e2013-06-29 18:55:54 +0200814 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000815 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200816 }
aliguoria1d1bb32008-11-18 20:07:32 +0000817 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000818}
819
820/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200821int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000822{
aliguoria1d1bb32008-11-18 20:07:32 +0000823 CPUBreakpoint *bp;
824
Andreas Färberf0c3c502013-08-26 21:22:53 +0200825 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000826 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200827 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000828 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000829 }
bellard4c3a88a2003-07-26 12:06:08 +0000830 }
aliguoria1d1bb32008-11-18 20:07:32 +0000831 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000832}
833
aliguoria1d1bb32008-11-18 20:07:32 +0000834/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200835void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000836{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
838
839 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000840
Anthony Liguori7267c092011-08-20 22:09:37 -0500841 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000842}
843
844/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200845void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000846{
aliguoric0ce9982008-11-25 22:13:57 +0000847 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000848
Andreas Färberf0c3c502013-08-26 21:22:53 +0200849 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200850 if (bp->flags & mask) {
851 cpu_breakpoint_remove_by_ref(cpu, bp);
852 }
aliguoric0ce9982008-11-25 22:13:57 +0000853 }
bellard4c3a88a2003-07-26 12:06:08 +0000854}
855
bellardc33a3462003-07-29 20:50:33 +0000856/* enable or disable single step mode. EXCP_DEBUG is returned by the
857 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200858void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000859{
Andreas Färbered2803d2013-06-21 20:20:45 +0200860 if (cpu->singlestep_enabled != enabled) {
861 cpu->singlestep_enabled = enabled;
862 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200863 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200864 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100865 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000866 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700867 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000868 }
bellardc33a3462003-07-29 20:50:33 +0000869 }
bellardc33a3462003-07-29 20:50:33 +0000870}
871
Andreas Färbera47dddd2013-09-03 17:38:47 +0200872void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000873{
874 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000875 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000876
877 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000878 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000879 fprintf(stderr, "qemu: fatal: ");
880 vfprintf(stderr, fmt, ap);
881 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200882 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100883 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000884 qemu_log("qemu: fatal: ");
885 qemu_log_vprintf(fmt, ap2);
886 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200887 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000888 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000889 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000890 }
pbrook493ae1f2007-11-23 16:53:59 +0000891 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000892 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300893 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200894#if defined(CONFIG_USER_ONLY)
895 {
896 struct sigaction act;
897 sigfillset(&act.sa_mask);
898 act.sa_handler = SIG_DFL;
899 sigaction(SIGABRT, &act, NULL);
900 }
901#endif
bellard75012672003-06-21 13:11:07 +0000902 abort();
903}
904
bellard01243112004-01-04 15:48:17 +0000905#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400906/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
908{
909 RAMBlock *block;
910
Paolo Bonzini43771532013-09-09 17:58:40 +0200911 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200912 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200913 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200914 }
Mike Day0dc3f442013-09-05 14:41:35 -0400915 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200916 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200917 goto found;
918 }
919 }
920
921 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
922 abort();
923
924found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200925 /* It is safe to write mru_block outside the iothread lock. This
926 * is what happens:
927 *
928 * mru_block = xxx
929 * rcu_read_unlock()
930 * xxx removed from list
931 * rcu_read_lock()
932 * read mru_block
933 * mru_block = NULL;
934 * call_rcu(reclaim_ramblock, xxx);
935 * rcu_read_unlock()
936 *
937 * atomic_rcu_set is not needed here. The block was already published
938 * when it was placed into the list. Here we're just making an extra
939 * copy of the pointer.
940 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 ram_list.mru_block = block;
942 return block;
943}
944
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200945static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000946{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700947 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200948 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200949 RAMBlock *block;
950 ram_addr_t end;
951
952 end = TARGET_PAGE_ALIGN(start + length);
953 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000954
Mike Day0dc3f442013-09-05 14:41:35 -0400955 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200956 block = qemu_get_ram_block(start);
957 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200958 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700959 CPU_FOREACH(cpu) {
960 tlb_reset_dirty(cpu, start1, length);
961 }
Mike Day0dc3f442013-09-05 14:41:35 -0400962 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200963}
964
965/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000966bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
967 ram_addr_t length,
968 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200969{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000970 unsigned long end, page;
971 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200972
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000973 if (length == 0) {
974 return false;
975 }
976
977 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
978 page = start >> TARGET_PAGE_BITS;
979 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
980 page, end - page);
981
982 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200983 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200984 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000985
986 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000987}
988
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100989/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200990hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200991 MemoryRegionSection *section,
992 target_ulong vaddr,
993 hwaddr paddr, hwaddr xlat,
994 int prot,
995 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000996{
Avi Kivitya8170e52012-10-23 12:30:10 +0200997 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000998 CPUWatchpoint *wp;
999
Blue Swirlcc5bea62012-04-14 14:56:48 +00001000 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001001 /* Normal RAM. */
1002 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001003 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001004 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001005 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001006 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001007 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001008 }
1009 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001010 AddressSpaceDispatch *d;
1011
1012 d = atomic_rcu_read(&section->address_space->dispatch);
1013 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001014 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001015 }
1016
1017 /* Make accesses to pages with watchpoints go via the
1018 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001019 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001020 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001021 /* Avoid trapping reads of pages with a write breakpoint. */
1022 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001023 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001024 *address |= TLB_MMIO;
1025 break;
1026 }
1027 }
1028 }
1029
1030 return iotlb;
1031}
bellard9fa3e852004-01-04 18:06:42 +00001032#endif /* defined(CONFIG_USER_ONLY) */
1033
pbrooke2eef172008-06-08 01:09:01 +00001034#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001035
Anthony Liguoric227f092009-10-01 16:12:16 -05001036static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001037 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001038static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001039
Igor Mammedova2b257d2014-10-31 16:38:37 +00001040static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1041 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001042
1043/*
1044 * Set a custom physical guest memory alloator.
1045 * Accelerators with unusual needs may need this. Hopefully, we can
1046 * get rid of it eventually.
1047 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001048void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001049{
1050 phys_mem_alloc = alloc;
1051}
1052
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053static uint16_t phys_section_add(PhysPageMap *map,
1054 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001055{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001056 /* The physical section number is ORed with a page-aligned
1057 * pointer to produce the iotlb entries. Thus it should
1058 * never overflow into the page-aligned value.
1059 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001061
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001062 if (map->sections_nb == map->sections_nb_alloc) {
1063 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1064 map->sections = g_renew(MemoryRegionSection, map->sections,
1065 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001066 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001067 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001068 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001069 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001070}
1071
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001072static void phys_section_destroy(MemoryRegion *mr)
1073{
Don Slutz55b4e802015-11-30 17:11:04 -05001074 bool have_sub_page = mr->subpage;
1075
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001076 memory_region_unref(mr);
1077
Don Slutz55b4e802015-11-30 17:11:04 -05001078 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001079 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001080 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001081 g_free(subpage);
1082 }
1083}
1084
Paolo Bonzini60926662013-05-29 12:30:26 +02001085static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001086{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001087 while (map->sections_nb > 0) {
1088 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001089 phys_section_destroy(section->mr);
1090 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001091 g_free(map->sections);
1092 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001093}
1094
Avi Kivityac1970f2012-10-03 16:22:53 +02001095static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096{
1097 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001098 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001099 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001100 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001101 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102 MemoryRegionSection subsection = {
1103 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001104 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001105 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001106 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107
Avi Kivityf3705d52012-03-08 16:16:34 +02001108 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109
Avi Kivityf3705d52012-03-08 16:16:34 +02001110 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001111 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001112 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001113 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001114 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001115 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001116 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001117 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001118 }
1119 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001120 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001121 subpage_register(subpage, start, end,
1122 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001123}
1124
1125
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001126static void register_multipage(AddressSpaceDispatch *d,
1127 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001128{
Avi Kivitya8170e52012-10-23 12:30:10 +02001129 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001130 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001131 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1132 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001133
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001134 assert(num_pages);
1135 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001136}
1137
Avi Kivityac1970f2012-10-03 16:22:53 +02001138static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001139{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001140 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001141 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001142 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001145 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1146 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1147 - now.offset_within_address_space;
1148
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001149 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001150 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001151 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001152 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001153 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 while (int128_ne(remain.size, now.size)) {
1155 remain.size = int128_sub(remain.size, now.size);
1156 remain.offset_within_address_space += int128_get64(now.size);
1157 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001158 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001160 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001161 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001162 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001163 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001164 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001165 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001166 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001167 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001168 }
1169}
1170
Sheng Yang62a27442010-01-26 19:21:16 +08001171void qemu_flush_coalesced_mmio_buffer(void)
1172{
1173 if (kvm_enabled())
1174 kvm_flush_coalesced_mmio_buffer();
1175}
1176
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001177void qemu_mutex_lock_ramlist(void)
1178{
1179 qemu_mutex_lock(&ram_list.mutex);
1180}
1181
1182void qemu_mutex_unlock_ramlist(void)
1183{
1184 qemu_mutex_unlock(&ram_list.mutex);
1185}
1186
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001187#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001188
1189#include <sys/vfs.h>
1190
1191#define HUGETLBFS_MAGIC 0x958458f6
1192
Hu Taofc7a5802014-09-09 13:28:01 +08001193static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001194{
1195 struct statfs fs;
1196 int ret;
1197
1198 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001199 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001200 } while (ret != 0 && errno == EINTR);
1201
1202 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001203 error_setg_errno(errp, errno, "failed to get page size of file %s",
1204 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001205 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001206 }
1207
Marcelo Tosattic9027602010-03-01 20:25:08 -03001208 return fs.f_bsize;
1209}
1210
Alex Williamson04b16652010-07-02 11:13:17 -06001211static void *file_ram_alloc(RAMBlock *block,
1212 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001213 const char *path,
1214 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001216 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001217 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001218 char *sanitized_name;
1219 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001220 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001222 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001223 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224
Hu Taofc7a5802014-09-09 13:28:01 +08001225 hpagesize = gethugepagesize(path, &local_err);
1226 if (local_err) {
1227 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001228 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001229 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001230 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231
1232 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001233 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1234 "or larger than huge page size 0x%" PRIx64,
1235 memory, hpagesize);
1236 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237 }
1238
1239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001240 error_setg(errp,
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001242 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001243 }
1244
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001245 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1246 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1247 sanitized_name = g_strdup(memory_region_name(block->mr));
1248 for (c = sanitized_name; *c != '\0'; c++) {
1249 if (*c == '/') {
1250 *c = '_';
1251 }
1252 }
1253
1254 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1255 sanitized_name);
1256 g_free(sanitized_name);
1257
1258 fd = mkstemp(filename);
1259 if (fd >= 0) {
1260 unlink(filename);
1261 }
1262 g_free(filename);
1263 } else {
1264 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001265 }
1266
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001268 error_setg_errno(errp, errno,
1269 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001270 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001271 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001272
Chen Hanxiao9284f312015-07-24 11:12:03 +08001273 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274
1275 /*
1276 * ftruncate is not supported by hugetlbfs in older
1277 * hosts, so don't bother bailing out on errors.
1278 * If anything goes wrong with it under other filesystems,
1279 * mmap will fail.
1280 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001281 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001282 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001283 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001284
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001285 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001286 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001287 error_setg_errno(errp, errno,
1288 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001289 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001290 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001291 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001292
1293 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001294 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001295 }
1296
Alex Williamson04b16652010-07-02 11:13:17 -06001297 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001298 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001299
1300error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001301 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001302}
1303#endif
1304
Mike Day0dc3f442013-09-05 14:41:35 -04001305/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001306static ram_addr_t find_ram_offset(ram_addr_t size)
1307{
Alex Williamson04b16652010-07-02 11:13:17 -06001308 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001309 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001310
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001311 assert(size != 0); /* it would hand out same offset multiple times */
1312
Mike Day0dc3f442013-09-05 14:41:35 -04001313 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001314 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001315 }
Alex Williamson04b16652010-07-02 11:13:17 -06001316
Mike Day0dc3f442013-09-05 14:41:35 -04001317 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001318 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001319
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001320 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001321
Mike Day0dc3f442013-09-05 14:41:35 -04001322 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001323 if (next_block->offset >= end) {
1324 next = MIN(next, next_block->offset);
1325 }
1326 }
1327 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001328 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001329 mingap = next - end;
1330 }
1331 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001332
1333 if (offset == RAM_ADDR_MAX) {
1334 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1335 (uint64_t)size);
1336 abort();
1337 }
1338
Alex Williamson04b16652010-07-02 11:13:17 -06001339 return offset;
1340}
1341
Juan Quintela652d7ec2012-07-20 10:37:54 +02001342ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001343{
Alex Williamsond17b5282010-06-25 11:08:38 -06001344 RAMBlock *block;
1345 ram_addr_t last = 0;
1346
Mike Day0dc3f442013-09-05 14:41:35 -04001347 rcu_read_lock();
1348 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001349 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001350 }
Mike Day0dc3f442013-09-05 14:41:35 -04001351 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001352 return last;
1353}
1354
Jason Baronddb97f12012-08-02 15:44:16 -04001355static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1356{
1357 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001358
1359 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001360 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001361 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1362 if (ret) {
1363 perror("qemu_madvise");
1364 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1365 "but dump_guest_core=off specified\n");
1366 }
1367 }
1368}
1369
Mike Day0dc3f442013-09-05 14:41:35 -04001370/* Called within an RCU critical section, or while the ramlist lock
1371 * is held.
1372 */
Hu Tao20cfe882014-04-02 15:13:26 +08001373static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001374{
Hu Tao20cfe882014-04-02 15:13:26 +08001375 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001376
Mike Day0dc3f442013-09-05 14:41:35 -04001377 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001378 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001379 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001380 }
1381 }
Hu Tao20cfe882014-04-02 15:13:26 +08001382
1383 return NULL;
1384}
1385
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001386const char *qemu_ram_get_idstr(RAMBlock *rb)
1387{
1388 return rb->idstr;
1389}
1390
Mike Dayae3a7042013-09-05 14:41:35 -04001391/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001392void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1393{
Mike Dayae3a7042013-09-05 14:41:35 -04001394 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001395
Mike Day0dc3f442013-09-05 14:41:35 -04001396 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001397 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001398 assert(new_block);
1399 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001400
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001401 if (dev) {
1402 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001403 if (id) {
1404 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001405 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001406 }
1407 }
1408 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1409
Mike Day0dc3f442013-09-05 14:41:35 -04001410 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001411 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001412 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1413 new_block->idstr);
1414 abort();
1415 }
1416 }
Mike Day0dc3f442013-09-05 14:41:35 -04001417 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001418}
1419
Mike Dayae3a7042013-09-05 14:41:35 -04001420/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001421void qemu_ram_unset_idstr(ram_addr_t addr)
1422{
Mike Dayae3a7042013-09-05 14:41:35 -04001423 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001424
Mike Dayae3a7042013-09-05 14:41:35 -04001425 /* FIXME: arch_init.c assumes that this is not called throughout
1426 * migration. Ignore the problem since hot-unplug during migration
1427 * does not work anyway.
1428 */
1429
Mike Day0dc3f442013-09-05 14:41:35 -04001430 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001431 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001432 if (block) {
1433 memset(block->idstr, 0, sizeof(block->idstr));
1434 }
Mike Day0dc3f442013-09-05 14:41:35 -04001435 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001436}
1437
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001438static int memory_try_enable_merging(void *addr, size_t len)
1439{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001440 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001441 /* disabled by the user */
1442 return 0;
1443 }
1444
1445 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1446}
1447
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001448/* Only legal before guest might have detected the memory size: e.g. on
1449 * incoming migration, or right after reset.
1450 *
1451 * As memory core doesn't know how is memory accessed, it is up to
1452 * resize callback to update device state and/or add assertions to detect
1453 * misuse, if necessary.
1454 */
1455int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1456{
1457 RAMBlock *block = find_ram_block(base);
1458
1459 assert(block);
1460
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001461 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001462
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001463 if (block->used_length == newsize) {
1464 return 0;
1465 }
1466
1467 if (!(block->flags & RAM_RESIZEABLE)) {
1468 error_setg_errno(errp, EINVAL,
1469 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1470 " in != 0x" RAM_ADDR_FMT, block->idstr,
1471 newsize, block->used_length);
1472 return -EINVAL;
1473 }
1474
1475 if (block->max_length < newsize) {
1476 error_setg_errno(errp, EINVAL,
1477 "Length too large: %s: 0x" RAM_ADDR_FMT
1478 " > 0x" RAM_ADDR_FMT, block->idstr,
1479 newsize, block->max_length);
1480 return -EINVAL;
1481 }
1482
1483 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1484 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001485 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1486 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001487 memory_region_set_size(block->mr, newsize);
1488 if (block->resized) {
1489 block->resized(block->idstr, newsize, block->host);
1490 }
1491 return 0;
1492}
1493
Hu Taoef701d72014-09-09 13:27:54 +08001494static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001495{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001496 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001497 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001498 ram_addr_t old_ram_size, new_ram_size;
1499
1500 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001501
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001502 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001503 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001504
1505 if (!new_block->host) {
1506 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001507 xen_ram_alloc(new_block->offset, new_block->max_length,
1508 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001509 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001510 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001511 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001512 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001513 error_setg_errno(errp, errno,
1514 "cannot set up guest memory '%s'",
1515 memory_region_name(new_block->mr));
1516 qemu_mutex_unlock_ramlist();
1517 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001518 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001519 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001520 }
1521 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001522
Li Zhijiandd631692015-07-02 20:18:06 +08001523 new_ram_size = MAX(old_ram_size,
1524 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1525 if (new_ram_size > old_ram_size) {
1526 migration_bitmap_extend(old_ram_size, new_ram_size);
1527 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001528 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1529 * QLIST (which has an RCU-friendly variant) does not have insertion at
1530 * tail, so save the last element in last_block.
1531 */
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001533 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001534 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001535 break;
1536 }
1537 }
1538 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001539 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001540 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001541 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001542 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001543 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001544 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001545 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001546
Mike Day0dc3f442013-09-05 14:41:35 -04001547 /* Write list before version */
1548 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001549 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001550 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001551
Juan Quintela2152f5c2013-10-08 13:52:02 +02001552 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1553
1554 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001555 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001556
1557 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001558 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1559 ram_list.dirty_memory[i] =
1560 bitmap_zero_extend(ram_list.dirty_memory[i],
1561 old_ram_size, new_ram_size);
1562 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001563 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001564 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001565 new_block->used_length,
1566 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001567
Paolo Bonzinia904c912015-01-21 16:18:35 +01001568 if (new_block->host) {
1569 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1570 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1571 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1572 if (kvm_enabled()) {
1573 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1574 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001576
1577 return new_block->offset;
1578}
1579
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001580#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001582 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001583 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001584{
1585 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001586 ram_addr_t addr;
1587 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001588
1589 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001590 error_setg(errp, "-mem-path not supported with Xen");
1591 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592 }
1593
1594 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1595 /*
1596 * file_ram_alloc() needs to allocate just like
1597 * phys_mem_alloc, but we haven't bothered to provide
1598 * a hook there.
1599 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001600 error_setg(errp,
1601 "-mem-path not supported with this accelerator");
1602 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001603 }
1604
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001605 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001606 new_block = g_malloc0(sizeof(*new_block));
1607 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001608 new_block->used_length = size;
1609 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001610 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001611 new_block->host = file_ram_alloc(new_block, size,
1612 mem_path, errp);
1613 if (!new_block->host) {
1614 g_free(new_block);
1615 return -1;
1616 }
1617
Hu Taoef701d72014-09-09 13:27:54 +08001618 addr = ram_block_add(new_block, &local_err);
1619 if (local_err) {
1620 g_free(new_block);
1621 error_propagate(errp, local_err);
1622 return -1;
1623 }
1624 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001625}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001626#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001627
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001628static
1629ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1630 void (*resized)(const char*,
1631 uint64_t length,
1632 void *host),
1633 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001634 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001635{
1636 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001637 ram_addr_t addr;
1638 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001639
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001640 size = HOST_PAGE_ALIGN(size);
1641 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642 new_block = g_malloc0(sizeof(*new_block));
1643 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001644 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001645 new_block->used_length = size;
1646 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001647 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001648 new_block->fd = -1;
1649 new_block->host = host;
1650 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001651 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001652 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001653 if (resizeable) {
1654 new_block->flags |= RAM_RESIZEABLE;
1655 }
Hu Taoef701d72014-09-09 13:27:54 +08001656 addr = ram_block_add(new_block, &local_err);
1657 if (local_err) {
1658 g_free(new_block);
1659 error_propagate(errp, local_err);
1660 return -1;
1661 }
1662 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001663}
1664
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001665ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1666 MemoryRegion *mr, Error **errp)
1667{
1668 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1669}
1670
Hu Taoef701d72014-09-09 13:27:54 +08001671ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001672{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001673 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1674}
1675
1676ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1677 void (*resized)(const char*,
1678 uint64_t length,
1679 void *host),
1680 MemoryRegion *mr, Error **errp)
1681{
1682 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001683}
bellarde9a1ab12007-02-08 23:08:38 +00001684
Paolo Bonzini43771532013-09-09 17:58:40 +02001685static void reclaim_ramblock(RAMBlock *block)
1686{
1687 if (block->flags & RAM_PREALLOC) {
1688 ;
1689 } else if (xen_enabled()) {
1690 xen_invalidate_map_cache_entry(block->host);
1691#ifndef _WIN32
1692 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001693 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001694 close(block->fd);
1695#endif
1696 } else {
1697 qemu_anon_ram_free(block->host, block->max_length);
1698 }
1699 g_free(block);
1700}
1701
Anthony Liguoric227f092009-10-01 16:12:16 -05001702void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001703{
Alex Williamson04b16652010-07-02 11:13:17 -06001704 RAMBlock *block;
1705
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001706 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001707 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001708 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001709 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001710 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001711 /* Write list before version */
1712 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001713 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001714 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001715 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001716 }
1717 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001718 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001719}
1720
Huang Yingcd19cfa2011-03-02 08:56:19 +01001721#ifndef _WIN32
1722void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1723{
1724 RAMBlock *block;
1725 ram_addr_t offset;
1726 int flags;
1727 void *area, *vaddr;
1728
Mike Day0dc3f442013-09-05 14:41:35 -04001729 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001730 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001731 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001732 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001733 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001734 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001735 } else if (xen_enabled()) {
1736 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001737 } else {
1738 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001739 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001740 flags |= (block->flags & RAM_SHARED ?
1741 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001742 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1743 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001744 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001745 /*
1746 * Remap needs to match alloc. Accelerators that
1747 * set phys_mem_alloc never remap. If they did,
1748 * we'd need a remap hook here.
1749 */
1750 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1751
Huang Yingcd19cfa2011-03-02 08:56:19 +01001752 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1753 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1754 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001755 }
1756 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001757 fprintf(stderr, "Could not remap addr: "
1758 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001759 length, addr);
1760 exit(1);
1761 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001762 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001763 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001764 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001765 }
1766 }
1767}
1768#endif /* !_WIN32 */
1769
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001770int qemu_get_ram_fd(ram_addr_t addr)
1771{
Mike Dayae3a7042013-09-05 14:41:35 -04001772 RAMBlock *block;
1773 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001774
Mike Day0dc3f442013-09-05 14:41:35 -04001775 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001776 block = qemu_get_ram_block(addr);
1777 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001778 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001779 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001780}
1781
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001782void qemu_set_ram_fd(ram_addr_t addr, int fd)
1783{
1784 RAMBlock *block;
1785
1786 rcu_read_lock();
1787 block = qemu_get_ram_block(addr);
1788 block->fd = fd;
1789 rcu_read_unlock();
1790}
1791
Damjan Marion3fd74b82014-06-26 23:01:32 +02001792void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1793{
Mike Dayae3a7042013-09-05 14:41:35 -04001794 RAMBlock *block;
1795 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001796
Mike Day0dc3f442013-09-05 14:41:35 -04001797 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001798 block = qemu_get_ram_block(addr);
1799 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001800 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001801 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001802}
1803
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001804/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001805 * This should not be used for general purpose DMA. Use address_space_map
1806 * or address_space_rw instead. For local memory (e.g. video ram) that the
1807 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001808 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001809 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001810 */
1811void *qemu_get_ram_ptr(ram_addr_t addr)
1812{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001813 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001814
1815 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001816 /* We need to check if the requested address is in the RAM
1817 * because we don't want to map the entire memory in QEMU.
1818 * In that case just map until the end of the page.
1819 */
1820 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001821 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001822 }
Mike Dayae3a7042013-09-05 14:41:35 -04001823
1824 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001825 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001826 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001827}
1828
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001829/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001830 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001831 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001832 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001833 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001834static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001835{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001836 RAMBlock *block;
1837 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001838 if (*size == 0) {
1839 return NULL;
1840 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001841
1842 block = qemu_get_ram_block(addr);
1843 offset_inside_block = addr - block->offset;
1844 *size = MIN(*size, block->max_length - offset_inside_block);
1845
1846 if (xen_enabled() && block->host == NULL) {
1847 /* We need to check if the requested address is in the RAM
1848 * because we don't want to map the entire memory in QEMU.
1849 * In that case just map the requested area.
1850 */
1851 if (block->offset == 0) {
1852 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001853 }
1854
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001855 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001856 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001857
1858 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001859}
1860
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001861/*
1862 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1863 * in that RAMBlock.
1864 *
1865 * ptr: Host pointer to look up
1866 * round_offset: If true round the result offset down to a page boundary
1867 * *ram_addr: set to result ram_addr
1868 * *offset: set to result offset within the RAMBlock
1869 *
1870 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001871 *
1872 * By the time this function returns, the returned pointer is not protected
1873 * by RCU anymore. If the caller is not within an RCU critical section and
1874 * does not hold the iothread lock, it must have other means of protecting the
1875 * pointer, such as a reference to the region that includes the incoming
1876 * ram_addr_t.
1877 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001878RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1879 ram_addr_t *ram_addr,
1880 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001881{
pbrook94a6b542009-04-11 17:15:54 +00001882 RAMBlock *block;
1883 uint8_t *host = ptr;
1884
Jan Kiszka868bb332011-06-21 22:59:09 +02001885 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001886 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001887 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001888 block = qemu_get_ram_block(*ram_addr);
1889 if (block) {
1890 *offset = (host - block->host);
1891 }
Mike Day0dc3f442013-09-05 14:41:35 -04001892 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001893 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001894 }
1895
Mike Day0dc3f442013-09-05 14:41:35 -04001896 rcu_read_lock();
1897 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001898 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001899 goto found;
1900 }
1901
Mike Day0dc3f442013-09-05 14:41:35 -04001902 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001903 /* This case append when the block is not mapped. */
1904 if (block->host == NULL) {
1905 continue;
1906 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001907 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001908 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001909 }
pbrook94a6b542009-04-11 17:15:54 +00001910 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001911
Mike Day0dc3f442013-09-05 14:41:35 -04001912 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001913 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001914
1915found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001916 *offset = (host - block->host);
1917 if (round_offset) {
1918 *offset &= TARGET_PAGE_MASK;
1919 }
1920 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001921 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001922 return block;
1923}
1924
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001925/*
1926 * Finds the named RAMBlock
1927 *
1928 * name: The name of RAMBlock to find
1929 *
1930 * Returns: RAMBlock (or NULL if not found)
1931 */
1932RAMBlock *qemu_ram_block_by_name(const char *name)
1933{
1934 RAMBlock *block;
1935
1936 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1937 if (!strcmp(name, block->idstr)) {
1938 return block;
1939 }
1940 }
1941
1942 return NULL;
1943}
1944
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001945/* Some of the softmmu routines need to translate from a host pointer
1946 (typically a TLB entry) back to a ram offset. */
1947MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1948{
1949 RAMBlock *block;
1950 ram_addr_t offset; /* Not used */
1951
1952 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1953
1954 if (!block) {
1955 return NULL;
1956 }
1957
1958 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001959}
Alex Williamsonf471a172010-06-11 11:11:42 -06001960
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001961/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001962static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001963 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001964{
Juan Quintela52159192013-10-08 12:44:04 +02001965 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001966 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001967 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001968 switch (size) {
1969 case 1:
1970 stb_p(qemu_get_ram_ptr(ram_addr), val);
1971 break;
1972 case 2:
1973 stw_p(qemu_get_ram_ptr(ram_addr), val);
1974 break;
1975 case 4:
1976 stl_p(qemu_get_ram_ptr(ram_addr), val);
1977 break;
1978 default:
1979 abort();
1980 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001981 /* Set both VGA and migration bits for simplicity and to remove
1982 * the notdirty callback faster.
1983 */
1984 cpu_physical_memory_set_dirty_range(ram_addr, size,
1985 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001986 /* we remove the notdirty callback only if the code has been
1987 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001988 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001989 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001990 }
bellard1ccde1c2004-02-06 19:46:14 +00001991}
1992
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001993static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1994 unsigned size, bool is_write)
1995{
1996 return is_write;
1997}
1998
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001999static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002000 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002001 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002002 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002003};
2004
pbrook0f459d12008-06-09 00:20:13 +00002005/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002006static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002007{
Andreas Färber93afead2013-08-26 03:41:01 +02002008 CPUState *cpu = current_cpu;
2009 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002010 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002011 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002012 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002013 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002014
Andreas Färberff4700b2013-08-26 18:23:18 +02002015 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002016 /* We re-entered the check after replacing the TB. Now raise
2017 * the debug interrupt so that is will trigger after the
2018 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002019 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002020 return;
2021 }
Andreas Färber93afead2013-08-26 03:41:01 +02002022 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002023 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002024 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2025 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002026 if (flags == BP_MEM_READ) {
2027 wp->flags |= BP_WATCHPOINT_HIT_READ;
2028 } else {
2029 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2030 }
2031 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002032 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002033 if (!cpu->watchpoint_hit) {
2034 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002035 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002036 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002037 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002038 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002039 } else {
2040 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002041 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002042 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002043 }
aliguori06d55cc2008-11-18 20:24:06 +00002044 }
aliguori6e140f22008-11-18 20:37:55 +00002045 } else {
2046 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002047 }
2048 }
2049}
2050
pbrook6658ffb2007-03-16 23:58:11 +00002051/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2052 so these check for a hit then pass through to the normal out-of-line
2053 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002054static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2055 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002056{
Peter Maydell66b9b432015-04-26 16:49:24 +01002057 MemTxResult res;
2058 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002059
Peter Maydell66b9b432015-04-26 16:49:24 +01002060 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002061 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002062 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002063 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002064 break;
2065 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002066 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002067 break;
2068 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002069 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002070 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002071 default: abort();
2072 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002073 *pdata = data;
2074 return res;
2075}
2076
2077static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2078 uint64_t val, unsigned size,
2079 MemTxAttrs attrs)
2080{
2081 MemTxResult res;
2082
2083 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2084 switch (size) {
2085 case 1:
2086 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2087 break;
2088 case 2:
2089 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2090 break;
2091 case 4:
2092 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2093 break;
2094 default: abort();
2095 }
2096 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002097}
2098
Avi Kivity1ec9b902012-01-02 12:47:48 +02002099static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002100 .read_with_attrs = watch_mem_read,
2101 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002102 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002103};
pbrook6658ffb2007-03-16 23:58:11 +00002104
Peter Maydellf25a49e2015-04-26 16:49:24 +01002105static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2106 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002107{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002108 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002109 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002110 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002111
blueswir1db7b5422007-05-26 17:36:03 +00002112#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002113 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002114 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002115#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002116 res = address_space_read(subpage->as, addr + subpage->base,
2117 attrs, buf, len);
2118 if (res) {
2119 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002120 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002121 switch (len) {
2122 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002123 *data = ldub_p(buf);
2124 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002125 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002126 *data = lduw_p(buf);
2127 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002129 *data = ldl_p(buf);
2130 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002131 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002132 *data = ldq_p(buf);
2133 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 default:
2135 abort();
2136 }
blueswir1db7b5422007-05-26 17:36:03 +00002137}
2138
Peter Maydellf25a49e2015-04-26 16:49:24 +01002139static MemTxResult subpage_write(void *opaque, hwaddr addr,
2140 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002141{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002143 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002144
blueswir1db7b5422007-05-26 17:36:03 +00002145#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002146 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002147 " value %"PRIx64"\n",
2148 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002149#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 switch (len) {
2151 case 1:
2152 stb_p(buf, value);
2153 break;
2154 case 2:
2155 stw_p(buf, value);
2156 break;
2157 case 4:
2158 stl_p(buf, value);
2159 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002160 case 8:
2161 stq_p(buf, value);
2162 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002163 default:
2164 abort();
2165 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002166 return address_space_write(subpage->as, addr + subpage->base,
2167 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002168}
2169
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002170static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002171 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002172{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002173 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002174#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002175 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002177#endif
2178
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002179 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002180 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002181}
2182
Avi Kivity70c68e42012-01-02 12:32:48 +02002183static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002184 .read_with_attrs = subpage_read,
2185 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002186 .impl.min_access_size = 1,
2187 .impl.max_access_size = 8,
2188 .valid.min_access_size = 1,
2189 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002190 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002191 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002192};
2193
Anthony Liguoric227f092009-10-01 16:12:16 -05002194static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002195 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002196{
2197 int idx, eidx;
2198
2199 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2200 return -1;
2201 idx = SUBPAGE_IDX(start);
2202 eidx = SUBPAGE_IDX(end);
2203#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002204 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2205 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002206#endif
blueswir1db7b5422007-05-26 17:36:03 +00002207 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002208 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002209 }
2210
2211 return 0;
2212}
2213
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002214static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002215{
Anthony Liguoric227f092009-10-01 16:12:16 -05002216 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002217
Anthony Liguori7267c092011-08-20 22:09:37 -05002218 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002219
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002221 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002222 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002223 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002224 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002225#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002226 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2227 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002228#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002229 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002230
2231 return mmio;
2232}
2233
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002234static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2235 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002236{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002237 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002238 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002239 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002240 .mr = mr,
2241 .offset_within_address_space = 0,
2242 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002243 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002244 };
2245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002246 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002247}
2248
Peter Maydella54c87b2016-01-21 14:15:05 +00002249MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002250{
Peter Maydella54c87b2016-01-21 14:15:05 +00002251 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2252 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002253 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002254 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002255
2256 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002257}
2258
Avi Kivitye9179ce2009-06-14 11:38:52 +03002259static void io_mem_init(void)
2260{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002261 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002262 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002263 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002264 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002265 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002266 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002267 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002268}
2269
Avi Kivityac1970f2012-10-03 16:22:53 +02002270static void mem_begin(MemoryListener *listener)
2271{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002272 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002273 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2274 uint16_t n;
2275
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002276 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002277 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002278 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002279 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002280 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002281 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002282 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002283 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002285 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002286 d->as = as;
2287 as->next_dispatch = d;
2288}
2289
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002290static void address_space_dispatch_free(AddressSpaceDispatch *d)
2291{
2292 phys_sections_free(&d->map);
2293 g_free(d);
2294}
2295
Paolo Bonzini00752702013-05-29 12:13:54 +02002296static void mem_commit(MemoryListener *listener)
2297{
2298 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002299 AddressSpaceDispatch *cur = as->dispatch;
2300 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002301
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002303
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002304 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002305 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002306 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002307 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002308}
2309
Avi Kivity1d711482012-10-02 18:54:45 +02002310static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002311{
Peter Maydell32857f42015-10-01 15:29:50 +01002312 CPUAddressSpace *cpuas;
2313 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002314
2315 /* since each CPU stores ram addresses in its TLB cache, we must
2316 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002317 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2318 cpu_reloading_memory_map();
2319 /* The CPU and TLB are protected by the iothread lock.
2320 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2321 * may have split the RCU critical section.
2322 */
2323 d = atomic_rcu_read(&cpuas->as->dispatch);
2324 cpuas->memory_dispatch = d;
2325 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002326}
2327
Avi Kivityac1970f2012-10-03 16:22:53 +02002328void address_space_init_dispatch(AddressSpace *as)
2329{
Paolo Bonzini00752702013-05-29 12:13:54 +02002330 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002331 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002332 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002333 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002334 .region_add = mem_add,
2335 .region_nop = mem_add,
2336 .priority = 0,
2337 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002338 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002339}
2340
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002341void address_space_unregister(AddressSpace *as)
2342{
2343 memory_listener_unregister(&as->dispatch_listener);
2344}
2345
Avi Kivity83f3c252012-10-07 12:59:55 +02002346void address_space_destroy_dispatch(AddressSpace *as)
2347{
2348 AddressSpaceDispatch *d = as->dispatch;
2349
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002350 atomic_rcu_set(&as->dispatch, NULL);
2351 if (d) {
2352 call_rcu(d, address_space_dispatch_free, rcu);
2353 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002354}
2355
Avi Kivity62152b82011-07-26 14:26:14 +03002356static void memory_map_init(void)
2357{
Anthony Liguori7267c092011-08-20 22:09:37 -05002358 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002359
Paolo Bonzini57271d62013-11-07 17:14:37 +01002360 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002361 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002362
Anthony Liguori7267c092011-08-20 22:09:37 -05002363 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002364 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2365 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002366 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002367}
2368
2369MemoryRegion *get_system_memory(void)
2370{
2371 return system_memory;
2372}
2373
Avi Kivity309cb472011-08-08 16:09:03 +03002374MemoryRegion *get_system_io(void)
2375{
2376 return system_io;
2377}
2378
pbrooke2eef172008-06-08 01:09:01 +00002379#endif /* !defined(CONFIG_USER_ONLY) */
2380
bellard13eb76e2004-01-24 15:23:36 +00002381/* physical memory access (slow version, mainly for debug) */
2382#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002383int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002384 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002385{
2386 int l, flags;
2387 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002388 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002389
2390 while (len > 0) {
2391 page = addr & TARGET_PAGE_MASK;
2392 l = (page + TARGET_PAGE_SIZE) - addr;
2393 if (l > len)
2394 l = len;
2395 flags = page_get_flags(page);
2396 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002397 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002398 if (is_write) {
2399 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002400 return -1;
bellard579a97f2007-11-11 14:26:47 +00002401 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002402 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002403 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002404 memcpy(p, buf, l);
2405 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002406 } else {
2407 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002408 return -1;
bellard579a97f2007-11-11 14:26:47 +00002409 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002410 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002411 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002412 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002413 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002414 }
2415 len -= l;
2416 buf += l;
2417 addr += l;
2418 }
Paul Brooka68fe892010-03-01 00:08:59 +00002419 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002420}
bellard8df1cd02005-01-28 22:37:22 +00002421
bellard13eb76e2004-01-24 15:23:36 +00002422#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002423
Paolo Bonzini845b6212015-03-23 11:45:53 +01002424static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002425 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002426{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002427 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2428 /* No early return if dirty_log_mask is or becomes 0, because
2429 * cpu_physical_memory_set_dirty_range will still call
2430 * xen_modified_memory.
2431 */
2432 if (dirty_log_mask) {
2433 dirty_log_mask =
2434 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002435 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002436 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2437 tb_invalidate_phys_range(addr, addr + length);
2438 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2439 }
2440 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002441}
2442
Richard Henderson23326162013-07-08 14:55:59 -07002443static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002444{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002445 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002446
2447 /* Regions are assumed to support 1-4 byte accesses unless
2448 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002449 if (access_size_max == 0) {
2450 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002451 }
Richard Henderson23326162013-07-08 14:55:59 -07002452
2453 /* Bound the maximum access by the alignment of the address. */
2454 if (!mr->ops->impl.unaligned) {
2455 unsigned align_size_max = addr & -addr;
2456 if (align_size_max != 0 && align_size_max < access_size_max) {
2457 access_size_max = align_size_max;
2458 }
2459 }
2460
2461 /* Don't attempt accesses larger than the maximum. */
2462 if (l > access_size_max) {
2463 l = access_size_max;
2464 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002465 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002466
2467 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002468}
2469
Jan Kiszka4840f102015-06-18 18:47:22 +02002470static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002471{
Jan Kiszka4840f102015-06-18 18:47:22 +02002472 bool unlocked = !qemu_mutex_iothread_locked();
2473 bool release_lock = false;
2474
2475 if (unlocked && mr->global_locking) {
2476 qemu_mutex_lock_iothread();
2477 unlocked = false;
2478 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002479 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002480 if (mr->flush_coalesced_mmio) {
2481 if (unlocked) {
2482 qemu_mutex_lock_iothread();
2483 }
2484 qemu_flush_coalesced_mmio_buffer();
2485 if (unlocked) {
2486 qemu_mutex_unlock_iothread();
2487 }
2488 }
2489
2490 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002491}
2492
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002493/* Called within RCU critical section. */
2494static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2495 MemTxAttrs attrs,
2496 const uint8_t *buf,
2497 int len, hwaddr addr1,
2498 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002499{
bellard13eb76e2004-01-24 15:23:36 +00002500 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002501 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002502 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002503 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002504
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002505 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002506 if (!memory_access_is_direct(mr, true)) {
2507 release_lock |= prepare_mmio_access(mr);
2508 l = memory_access_size(mr, l, addr1);
2509 /* XXX: could force current_cpu to NULL to avoid
2510 potential bugs */
2511 switch (l) {
2512 case 8:
2513 /* 64 bit write access */
2514 val = ldq_p(buf);
2515 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2516 attrs);
2517 break;
2518 case 4:
2519 /* 32 bit write access */
2520 val = ldl_p(buf);
2521 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2522 attrs);
2523 break;
2524 case 2:
2525 /* 16 bit write access */
2526 val = lduw_p(buf);
2527 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2528 attrs);
2529 break;
2530 case 1:
2531 /* 8 bit write access */
2532 val = ldub_p(buf);
2533 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2534 attrs);
2535 break;
2536 default:
2537 abort();
bellard13eb76e2004-01-24 15:23:36 +00002538 }
2539 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002540 addr1 += memory_region_get_ram_addr(mr);
2541 /* RAM case */
2542 ptr = qemu_get_ram_ptr(addr1);
2543 memcpy(ptr, buf, l);
2544 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002545 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002546
2547 if (release_lock) {
2548 qemu_mutex_unlock_iothread();
2549 release_lock = false;
2550 }
2551
bellard13eb76e2004-01-24 15:23:36 +00002552 len -= l;
2553 buf += l;
2554 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002555
2556 if (!len) {
2557 break;
2558 }
2559
2560 l = len;
2561 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002562 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002563
Peter Maydell3b643492015-04-26 16:49:23 +01002564 return result;
bellard13eb76e2004-01-24 15:23:36 +00002565}
bellard8df1cd02005-01-28 22:37:22 +00002566
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002567MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2568 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002569{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002570 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002571 hwaddr addr1;
2572 MemoryRegion *mr;
2573 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002574
2575 if (len > 0) {
2576 rcu_read_lock();
2577 l = len;
2578 mr = address_space_translate(as, addr, &addr1, &l, true);
2579 result = address_space_write_continue(as, addr, attrs, buf, len,
2580 addr1, l, mr);
2581 rcu_read_unlock();
2582 }
2583
2584 return result;
2585}
2586
2587/* Called within RCU critical section. */
2588MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2589 MemTxAttrs attrs, uint8_t *buf,
2590 int len, hwaddr addr1, hwaddr l,
2591 MemoryRegion *mr)
2592{
2593 uint8_t *ptr;
2594 uint64_t val;
2595 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002596 bool release_lock = false;
2597
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002598 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002599 if (!memory_access_is_direct(mr, false)) {
2600 /* I/O case */
2601 release_lock |= prepare_mmio_access(mr);
2602 l = memory_access_size(mr, l, addr1);
2603 switch (l) {
2604 case 8:
2605 /* 64 bit read access */
2606 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2607 attrs);
2608 stq_p(buf, val);
2609 break;
2610 case 4:
2611 /* 32 bit read access */
2612 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2613 attrs);
2614 stl_p(buf, val);
2615 break;
2616 case 2:
2617 /* 16 bit read access */
2618 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2619 attrs);
2620 stw_p(buf, val);
2621 break;
2622 case 1:
2623 /* 8 bit read access */
2624 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2625 attrs);
2626 stb_p(buf, val);
2627 break;
2628 default:
2629 abort();
2630 }
2631 } else {
2632 /* RAM case */
2633 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2634 memcpy(buf, ptr, l);
2635 }
2636
2637 if (release_lock) {
2638 qemu_mutex_unlock_iothread();
2639 release_lock = false;
2640 }
2641
2642 len -= l;
2643 buf += l;
2644 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002645
2646 if (!len) {
2647 break;
2648 }
2649
2650 l = len;
2651 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002652 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002653
2654 return result;
2655}
2656
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002657MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2658 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002659{
2660 hwaddr l;
2661 hwaddr addr1;
2662 MemoryRegion *mr;
2663 MemTxResult result = MEMTX_OK;
2664
2665 if (len > 0) {
2666 rcu_read_lock();
2667 l = len;
2668 mr = address_space_translate(as, addr, &addr1, &l, false);
2669 result = address_space_read_continue(as, addr, attrs, buf, len,
2670 addr1, l, mr);
2671 rcu_read_unlock();
2672 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002673
2674 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002675}
2676
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002677MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2678 uint8_t *buf, int len, bool is_write)
2679{
2680 if (is_write) {
2681 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2682 } else {
2683 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2684 }
2685}
Avi Kivityac1970f2012-10-03 16:22:53 +02002686
Avi Kivitya8170e52012-10-23 12:30:10 +02002687void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002688 int len, int is_write)
2689{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002690 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2691 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002692}
2693
Alexander Graf582b55a2013-12-11 14:17:44 +01002694enum write_rom_type {
2695 WRITE_DATA,
2696 FLUSH_CACHE,
2697};
2698
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002699static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002700 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002701{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002702 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002703 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002704 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002705 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002706
Paolo Bonzini41063e12015-03-18 14:21:43 +01002707 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002708 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002709 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002710 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002711
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002712 if (!(memory_region_is_ram(mr) ||
2713 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002714 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002715 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002716 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002717 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002718 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002719 switch (type) {
2720 case WRITE_DATA:
2721 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002722 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002723 break;
2724 case FLUSH_CACHE:
2725 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2726 break;
2727 }
bellardd0ecd2a2006-04-23 17:14:48 +00002728 }
2729 len -= l;
2730 buf += l;
2731 addr += l;
2732 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002733 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002734}
2735
Alexander Graf582b55a2013-12-11 14:17:44 +01002736/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002737void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002738 const uint8_t *buf, int len)
2739{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002740 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002741}
2742
2743void cpu_flush_icache_range(hwaddr start, int len)
2744{
2745 /*
2746 * This function should do the same thing as an icache flush that was
2747 * triggered from within the guest. For TCG we are always cache coherent,
2748 * so there is no need to flush anything. For KVM / Xen we need to flush
2749 * the host's instruction cache at least.
2750 */
2751 if (tcg_enabled()) {
2752 return;
2753 }
2754
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002755 cpu_physical_memory_write_rom_internal(&address_space_memory,
2756 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002757}
2758
aliguori6d16c2f2009-01-22 16:59:11 +00002759typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002760 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002761 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002762 hwaddr addr;
2763 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002764 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002765} BounceBuffer;
2766
2767static BounceBuffer bounce;
2768
aliguoriba223c22009-01-22 16:59:16 +00002769typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002770 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002771 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002772} MapClient;
2773
Fam Zheng38e047b2015-03-16 17:03:35 +08002774QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002775static QLIST_HEAD(map_client_list, MapClient) map_client_list
2776 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002777
Fam Zhenge95205e2015-03-16 17:03:37 +08002778static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002779{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002780 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002781 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002782}
2783
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002784static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002785{
2786 MapClient *client;
2787
Blue Swirl72cf2d42009-09-12 07:36:22 +00002788 while (!QLIST_EMPTY(&map_client_list)) {
2789 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002790 qemu_bh_schedule(client->bh);
2791 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002792 }
2793}
2794
Fam Zhenge95205e2015-03-16 17:03:37 +08002795void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002796{
2797 MapClient *client = g_malloc(sizeof(*client));
2798
Fam Zheng38e047b2015-03-16 17:03:35 +08002799 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002800 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002801 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002802 if (!atomic_read(&bounce.in_use)) {
2803 cpu_notify_map_clients_locked();
2804 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002805 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002806}
2807
Fam Zheng38e047b2015-03-16 17:03:35 +08002808void cpu_exec_init_all(void)
2809{
2810 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002811 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002812 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002813 qemu_mutex_init(&map_client_list_lock);
2814}
2815
Fam Zhenge95205e2015-03-16 17:03:37 +08002816void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002817{
Fam Zhenge95205e2015-03-16 17:03:37 +08002818 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002819
Fam Zhenge95205e2015-03-16 17:03:37 +08002820 qemu_mutex_lock(&map_client_list_lock);
2821 QLIST_FOREACH(client, &map_client_list, link) {
2822 if (client->bh == bh) {
2823 cpu_unregister_map_client_do(client);
2824 break;
2825 }
2826 }
2827 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002828}
2829
2830static void cpu_notify_map_clients(void)
2831{
Fam Zheng38e047b2015-03-16 17:03:35 +08002832 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002833 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002834 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002835}
2836
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002837bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2838{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002839 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002840 hwaddr l, xlat;
2841
Paolo Bonzini41063e12015-03-18 14:21:43 +01002842 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002843 while (len > 0) {
2844 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002845 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2846 if (!memory_access_is_direct(mr, is_write)) {
2847 l = memory_access_size(mr, l, addr);
2848 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002849 return false;
2850 }
2851 }
2852
2853 len -= l;
2854 addr += l;
2855 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002856 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002857 return true;
2858}
2859
aliguori6d16c2f2009-01-22 16:59:11 +00002860/* Map a physical memory region into a host virtual address.
2861 * May map a subset of the requested range, given by and returned in *plen.
2862 * May return NULL if resources needed to perform the mapping are exhausted.
2863 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002864 * Use cpu_register_map_client() to know when retrying the map operation is
2865 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002866 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002867void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002868 hwaddr addr,
2869 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002870 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002871{
Avi Kivitya8170e52012-10-23 12:30:10 +02002872 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002873 hwaddr done = 0;
2874 hwaddr l, xlat, base;
2875 MemoryRegion *mr, *this_mr;
2876 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002877 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002878
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002879 if (len == 0) {
2880 return NULL;
2881 }
aliguori6d16c2f2009-01-22 16:59:11 +00002882
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002883 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002884 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002885 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002886
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002887 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002888 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002889 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002890 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002891 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002892 /* Avoid unbounded allocations */
2893 l = MIN(l, TARGET_PAGE_SIZE);
2894 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002895 bounce.addr = addr;
2896 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002897
2898 memory_region_ref(mr);
2899 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002900 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002901 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2902 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002903 }
aliguori6d16c2f2009-01-22 16:59:11 +00002904
Paolo Bonzini41063e12015-03-18 14:21:43 +01002905 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002906 *plen = l;
2907 return bounce.buffer;
2908 }
2909
2910 base = xlat;
2911 raddr = memory_region_get_ram_addr(mr);
2912
2913 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002914 len -= l;
2915 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002916 done += l;
2917 if (len == 0) {
2918 break;
2919 }
2920
2921 l = len;
2922 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2923 if (this_mr != mr || xlat != base + done) {
2924 break;
2925 }
aliguori6d16c2f2009-01-22 16:59:11 +00002926 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002927
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002928 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002929 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002930 ptr = qemu_ram_ptr_length(raddr + base, plen);
2931 rcu_read_unlock();
2932
2933 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002934}
2935
Avi Kivityac1970f2012-10-03 16:22:53 +02002936/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002937 * Will also mark the memory as dirty if is_write == 1. access_len gives
2938 * the amount of memory that was actually read or written by the caller.
2939 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002940void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2941 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002942{
2943 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002944 MemoryRegion *mr;
2945 ram_addr_t addr1;
2946
2947 mr = qemu_ram_addr_from_host(buffer, &addr1);
2948 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002949 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002950 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002951 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002952 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002953 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002954 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002955 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002956 return;
2957 }
2958 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002959 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2960 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002961 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002962 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002963 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002964 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002965 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002966 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002967}
bellardd0ecd2a2006-04-23 17:14:48 +00002968
Avi Kivitya8170e52012-10-23 12:30:10 +02002969void *cpu_physical_memory_map(hwaddr addr,
2970 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002971 int is_write)
2972{
2973 return address_space_map(&address_space_memory, addr, plen, is_write);
2974}
2975
Avi Kivitya8170e52012-10-23 12:30:10 +02002976void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2977 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002978{
2979 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2980}
2981
bellard8df1cd02005-01-28 22:37:22 +00002982/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002983static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2984 MemTxAttrs attrs,
2985 MemTxResult *result,
2986 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002987{
bellard8df1cd02005-01-28 22:37:22 +00002988 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002989 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002990 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002991 hwaddr l = 4;
2992 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002993 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002994 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002995
Paolo Bonzini41063e12015-03-18 14:21:43 +01002996 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002997 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002998 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002999 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003000
bellard8df1cd02005-01-28 22:37:22 +00003001 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003002 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003003#if defined(TARGET_WORDS_BIGENDIAN)
3004 if (endian == DEVICE_LITTLE_ENDIAN) {
3005 val = bswap32(val);
3006 }
3007#else
3008 if (endian == DEVICE_BIG_ENDIAN) {
3009 val = bswap32(val);
3010 }
3011#endif
bellard8df1cd02005-01-28 22:37:22 +00003012 } else {
3013 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003014 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003015 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003016 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003017 switch (endian) {
3018 case DEVICE_LITTLE_ENDIAN:
3019 val = ldl_le_p(ptr);
3020 break;
3021 case DEVICE_BIG_ENDIAN:
3022 val = ldl_be_p(ptr);
3023 break;
3024 default:
3025 val = ldl_p(ptr);
3026 break;
3027 }
Peter Maydell50013112015-04-26 16:49:24 +01003028 r = MEMTX_OK;
3029 }
3030 if (result) {
3031 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003032 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003033 if (release_lock) {
3034 qemu_mutex_unlock_iothread();
3035 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003036 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003037 return val;
3038}
3039
Peter Maydell50013112015-04-26 16:49:24 +01003040uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3041 MemTxAttrs attrs, MemTxResult *result)
3042{
3043 return address_space_ldl_internal(as, addr, attrs, result,
3044 DEVICE_NATIVE_ENDIAN);
3045}
3046
3047uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3048 MemTxAttrs attrs, MemTxResult *result)
3049{
3050 return address_space_ldl_internal(as, addr, attrs, result,
3051 DEVICE_LITTLE_ENDIAN);
3052}
3053
3054uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3055 MemTxAttrs attrs, MemTxResult *result)
3056{
3057 return address_space_ldl_internal(as, addr, attrs, result,
3058 DEVICE_BIG_ENDIAN);
3059}
3060
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003061uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003062{
Peter Maydell50013112015-04-26 16:49:24 +01003063 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003064}
3065
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003066uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003067{
Peter Maydell50013112015-04-26 16:49:24 +01003068 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003069}
3070
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003071uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003072{
Peter Maydell50013112015-04-26 16:49:24 +01003073 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003074}
3075
bellard84b7b8e2005-11-28 21:19:04 +00003076/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003077static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3078 MemTxAttrs attrs,
3079 MemTxResult *result,
3080 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003081{
bellard84b7b8e2005-11-28 21:19:04 +00003082 uint8_t *ptr;
3083 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003084 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003085 hwaddr l = 8;
3086 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003087 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003088 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003089
Paolo Bonzini41063e12015-03-18 14:21:43 +01003090 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003091 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003092 false);
3093 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003094 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003095
bellard84b7b8e2005-11-28 21:19:04 +00003096 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003097 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003098#if defined(TARGET_WORDS_BIGENDIAN)
3099 if (endian == DEVICE_LITTLE_ENDIAN) {
3100 val = bswap64(val);
3101 }
3102#else
3103 if (endian == DEVICE_BIG_ENDIAN) {
3104 val = bswap64(val);
3105 }
3106#endif
bellard84b7b8e2005-11-28 21:19:04 +00003107 } else {
3108 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003109 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003110 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003111 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003112 switch (endian) {
3113 case DEVICE_LITTLE_ENDIAN:
3114 val = ldq_le_p(ptr);
3115 break;
3116 case DEVICE_BIG_ENDIAN:
3117 val = ldq_be_p(ptr);
3118 break;
3119 default:
3120 val = ldq_p(ptr);
3121 break;
3122 }
Peter Maydell50013112015-04-26 16:49:24 +01003123 r = MEMTX_OK;
3124 }
3125 if (result) {
3126 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003127 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003128 if (release_lock) {
3129 qemu_mutex_unlock_iothread();
3130 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003131 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003132 return val;
3133}
3134
Peter Maydell50013112015-04-26 16:49:24 +01003135uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3136 MemTxAttrs attrs, MemTxResult *result)
3137{
3138 return address_space_ldq_internal(as, addr, attrs, result,
3139 DEVICE_NATIVE_ENDIAN);
3140}
3141
3142uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3143 MemTxAttrs attrs, MemTxResult *result)
3144{
3145 return address_space_ldq_internal(as, addr, attrs, result,
3146 DEVICE_LITTLE_ENDIAN);
3147}
3148
3149uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3150 MemTxAttrs attrs, MemTxResult *result)
3151{
3152 return address_space_ldq_internal(as, addr, attrs, result,
3153 DEVICE_BIG_ENDIAN);
3154}
3155
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003156uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157{
Peter Maydell50013112015-04-26 16:49:24 +01003158 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159}
3160
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003161uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003162{
Peter Maydell50013112015-04-26 16:49:24 +01003163 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003164}
3165
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003166uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003167{
Peter Maydell50013112015-04-26 16:49:24 +01003168 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003169}
3170
bellardaab33092005-10-30 20:48:42 +00003171/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003172uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3173 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003174{
3175 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003176 MemTxResult r;
3177
3178 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3179 if (result) {
3180 *result = r;
3181 }
bellardaab33092005-10-30 20:48:42 +00003182 return val;
3183}
3184
Peter Maydell50013112015-04-26 16:49:24 +01003185uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3186{
3187 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3188}
3189
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003190/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003191static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3192 hwaddr addr,
3193 MemTxAttrs attrs,
3194 MemTxResult *result,
3195 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003196{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003197 uint8_t *ptr;
3198 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003199 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003200 hwaddr l = 2;
3201 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003202 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003203 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003204
Paolo Bonzini41063e12015-03-18 14:21:43 +01003205 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003206 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003207 false);
3208 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003209 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003210
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003211 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003212 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003213#if defined(TARGET_WORDS_BIGENDIAN)
3214 if (endian == DEVICE_LITTLE_ENDIAN) {
3215 val = bswap16(val);
3216 }
3217#else
3218 if (endian == DEVICE_BIG_ENDIAN) {
3219 val = bswap16(val);
3220 }
3221#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003222 } else {
3223 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003224 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003225 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003226 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003227 switch (endian) {
3228 case DEVICE_LITTLE_ENDIAN:
3229 val = lduw_le_p(ptr);
3230 break;
3231 case DEVICE_BIG_ENDIAN:
3232 val = lduw_be_p(ptr);
3233 break;
3234 default:
3235 val = lduw_p(ptr);
3236 break;
3237 }
Peter Maydell50013112015-04-26 16:49:24 +01003238 r = MEMTX_OK;
3239 }
3240 if (result) {
3241 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003242 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003243 if (release_lock) {
3244 qemu_mutex_unlock_iothread();
3245 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003246 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003247 return val;
bellardaab33092005-10-30 20:48:42 +00003248}
3249
Peter Maydell50013112015-04-26 16:49:24 +01003250uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3251 MemTxAttrs attrs, MemTxResult *result)
3252{
3253 return address_space_lduw_internal(as, addr, attrs, result,
3254 DEVICE_NATIVE_ENDIAN);
3255}
3256
3257uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3258 MemTxAttrs attrs, MemTxResult *result)
3259{
3260 return address_space_lduw_internal(as, addr, attrs, result,
3261 DEVICE_LITTLE_ENDIAN);
3262}
3263
3264uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3265 MemTxAttrs attrs, MemTxResult *result)
3266{
3267 return address_space_lduw_internal(as, addr, attrs, result,
3268 DEVICE_BIG_ENDIAN);
3269}
3270
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003271uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003272{
Peter Maydell50013112015-04-26 16:49:24 +01003273 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003274}
3275
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003276uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003277{
Peter Maydell50013112015-04-26 16:49:24 +01003278 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003279}
3280
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003281uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003282{
Peter Maydell50013112015-04-26 16:49:24 +01003283 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003284}
3285
bellard8df1cd02005-01-28 22:37:22 +00003286/* warning: addr must be aligned. The ram page is not masked as dirty
3287 and the code inside is not invalidated. It is useful if the dirty
3288 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003289void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3290 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003291{
bellard8df1cd02005-01-28 22:37:22 +00003292 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003293 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003294 hwaddr l = 4;
3295 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003296 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003297 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003298 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003299
Paolo Bonzini41063e12015-03-18 14:21:43 +01003300 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003301 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003302 true);
3303 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003304 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003305
Peter Maydell50013112015-04-26 16:49:24 +01003306 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003307 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003308 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003309 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003310 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003311
Paolo Bonzini845b6212015-03-23 11:45:53 +01003312 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3313 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003314 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003315 r = MEMTX_OK;
3316 }
3317 if (result) {
3318 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003319 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003320 if (release_lock) {
3321 qemu_mutex_unlock_iothread();
3322 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003323 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003324}
3325
Peter Maydell50013112015-04-26 16:49:24 +01003326void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3327{
3328 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3329}
3330
bellard8df1cd02005-01-28 22:37:22 +00003331/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003332static inline void address_space_stl_internal(AddressSpace *as,
3333 hwaddr addr, uint32_t val,
3334 MemTxAttrs attrs,
3335 MemTxResult *result,
3336 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003337{
bellard8df1cd02005-01-28 22:37:22 +00003338 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003339 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003340 hwaddr l = 4;
3341 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003342 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003343 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003344
Paolo Bonzini41063e12015-03-18 14:21:43 +01003345 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003346 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003347 true);
3348 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003349 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003350
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003351#if defined(TARGET_WORDS_BIGENDIAN)
3352 if (endian == DEVICE_LITTLE_ENDIAN) {
3353 val = bswap32(val);
3354 }
3355#else
3356 if (endian == DEVICE_BIG_ENDIAN) {
3357 val = bswap32(val);
3358 }
3359#endif
Peter Maydell50013112015-04-26 16:49:24 +01003360 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003361 } else {
bellard8df1cd02005-01-28 22:37:22 +00003362 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003363 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003364 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365 switch (endian) {
3366 case DEVICE_LITTLE_ENDIAN:
3367 stl_le_p(ptr, val);
3368 break;
3369 case DEVICE_BIG_ENDIAN:
3370 stl_be_p(ptr, val);
3371 break;
3372 default:
3373 stl_p(ptr, val);
3374 break;
3375 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003376 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003377 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003378 }
Peter Maydell50013112015-04-26 16:49:24 +01003379 if (result) {
3380 *result = r;
3381 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003382 if (release_lock) {
3383 qemu_mutex_unlock_iothread();
3384 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003385 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003386}
3387
3388void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3389 MemTxAttrs attrs, MemTxResult *result)
3390{
3391 address_space_stl_internal(as, addr, val, attrs, result,
3392 DEVICE_NATIVE_ENDIAN);
3393}
3394
3395void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3396 MemTxAttrs attrs, MemTxResult *result)
3397{
3398 address_space_stl_internal(as, addr, val, attrs, result,
3399 DEVICE_LITTLE_ENDIAN);
3400}
3401
3402void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3403 MemTxAttrs attrs, MemTxResult *result)
3404{
3405 address_space_stl_internal(as, addr, val, attrs, result,
3406 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003407}
3408
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003409void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003410{
Peter Maydell50013112015-04-26 16:49:24 +01003411 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003412}
3413
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003414void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003415{
Peter Maydell50013112015-04-26 16:49:24 +01003416 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003417}
3418
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003419void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003420{
Peter Maydell50013112015-04-26 16:49:24 +01003421 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003422}
3423
bellardaab33092005-10-30 20:48:42 +00003424/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003425void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3426 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003427{
3428 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003429 MemTxResult r;
3430
3431 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3432 if (result) {
3433 *result = r;
3434 }
3435}
3436
3437void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3438{
3439 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003440}
3441
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003442/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003443static inline void address_space_stw_internal(AddressSpace *as,
3444 hwaddr addr, uint32_t val,
3445 MemTxAttrs attrs,
3446 MemTxResult *result,
3447 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003448{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003449 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003450 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003451 hwaddr l = 2;
3452 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003453 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003454 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003455
Paolo Bonzini41063e12015-03-18 14:21:43 +01003456 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003457 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003458 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003459 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003460
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003461#if defined(TARGET_WORDS_BIGENDIAN)
3462 if (endian == DEVICE_LITTLE_ENDIAN) {
3463 val = bswap16(val);
3464 }
3465#else
3466 if (endian == DEVICE_BIG_ENDIAN) {
3467 val = bswap16(val);
3468 }
3469#endif
Peter Maydell50013112015-04-26 16:49:24 +01003470 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003471 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003472 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003473 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003474 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003475 switch (endian) {
3476 case DEVICE_LITTLE_ENDIAN:
3477 stw_le_p(ptr, val);
3478 break;
3479 case DEVICE_BIG_ENDIAN:
3480 stw_be_p(ptr, val);
3481 break;
3482 default:
3483 stw_p(ptr, val);
3484 break;
3485 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003486 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003487 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003488 }
Peter Maydell50013112015-04-26 16:49:24 +01003489 if (result) {
3490 *result = r;
3491 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003492 if (release_lock) {
3493 qemu_mutex_unlock_iothread();
3494 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003495 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003496}
3497
3498void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3499 MemTxAttrs attrs, MemTxResult *result)
3500{
3501 address_space_stw_internal(as, addr, val, attrs, result,
3502 DEVICE_NATIVE_ENDIAN);
3503}
3504
3505void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3506 MemTxAttrs attrs, MemTxResult *result)
3507{
3508 address_space_stw_internal(as, addr, val, attrs, result,
3509 DEVICE_LITTLE_ENDIAN);
3510}
3511
3512void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3513 MemTxAttrs attrs, MemTxResult *result)
3514{
3515 address_space_stw_internal(as, addr, val, attrs, result,
3516 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003517}
3518
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003519void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003520{
Peter Maydell50013112015-04-26 16:49:24 +01003521 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003522}
3523
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003524void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003525{
Peter Maydell50013112015-04-26 16:49:24 +01003526 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003527}
3528
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003529void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003530{
Peter Maydell50013112015-04-26 16:49:24 +01003531 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003532}
3533
bellardaab33092005-10-30 20:48:42 +00003534/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003535void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3536 MemTxAttrs attrs, MemTxResult *result)
3537{
3538 MemTxResult r;
3539 val = tswap64(val);
3540 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3541 if (result) {
3542 *result = r;
3543 }
3544}
3545
3546void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3547 MemTxAttrs attrs, MemTxResult *result)
3548{
3549 MemTxResult r;
3550 val = cpu_to_le64(val);
3551 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3552 if (result) {
3553 *result = r;
3554 }
3555}
3556void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3557 MemTxAttrs attrs, MemTxResult *result)
3558{
3559 MemTxResult r;
3560 val = cpu_to_be64(val);
3561 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3562 if (result) {
3563 *result = r;
3564 }
3565}
3566
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003567void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003568{
Peter Maydell50013112015-04-26 16:49:24 +01003569 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003570}
3571
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003572void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003573{
Peter Maydell50013112015-04-26 16:49:24 +01003574 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003575}
3576
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003577void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003578{
Peter Maydell50013112015-04-26 16:49:24 +01003579 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003580}
3581
aliguori5e2972f2009-03-28 17:51:36 +00003582/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003583int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003584 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003585{
3586 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003587 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003588 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003589
3590 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003591 int asidx;
3592 MemTxAttrs attrs;
3593
bellard13eb76e2004-01-24 15:23:36 +00003594 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003595 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3596 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003597 /* if no physical page mapped, return an error */
3598 if (phys_addr == -1)
3599 return -1;
3600 l = (page + TARGET_PAGE_SIZE) - addr;
3601 if (l > len)
3602 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003603 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003604 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003605 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3606 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003607 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003608 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3609 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003610 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003611 }
bellard13eb76e2004-01-24 15:23:36 +00003612 len -= l;
3613 buf += l;
3614 addr += l;
3615 }
3616 return 0;
3617}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003618
3619/*
3620 * Allows code that needs to deal with migration bitmaps etc to still be built
3621 * target independent.
3622 */
3623size_t qemu_target_page_bits(void)
3624{
3625 return TARGET_PAGE_BITS;
3626}
3627
Paul Brooka68fe892010-03-01 00:08:59 +00003628#endif
bellard13eb76e2004-01-24 15:23:36 +00003629
Blue Swirl8e4a4242013-01-06 18:30:17 +00003630/*
3631 * A helper function for the _utterly broken_ virtio device model to find out if
3632 * it's running on a big endian machine. Don't do this at home kids!
3633 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003634bool target_words_bigendian(void);
3635bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003636{
3637#if defined(TARGET_WORDS_BIGENDIAN)
3638 return true;
3639#else
3640 return false;
3641#endif
3642}
3643
Wen Congyang76f35532012-05-07 12:04:18 +08003644#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003645bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003646{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003647 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003648 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003649 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003650
Paolo Bonzini41063e12015-03-18 14:21:43 +01003651 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003652 mr = address_space_translate(&address_space_memory,
3653 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003654
Paolo Bonzini41063e12015-03-18 14:21:43 +01003655 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3656 rcu_read_unlock();
3657 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003658}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003659
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003660int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003661{
3662 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003663 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003664
Mike Day0dc3f442013-09-05 14:41:35 -04003665 rcu_read_lock();
3666 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003667 ret = func(block->idstr, block->host, block->offset,
3668 block->used_length, opaque);
3669 if (ret) {
3670 break;
3671 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003672 }
Mike Day0dc3f442013-09-05 14:41:35 -04003673 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003674 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003675}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003676#endif