blob: 069848b26aea94ebeb08d2ca96bbe8c3c90aded5 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200311static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200312 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000313{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200314 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200315 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200317
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200318 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200319 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200320 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200321 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100323 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200324 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200325
326 if (sections[lp.ptr].size.hi ||
327 range_covers_byte(sections[lp.ptr].offset_within_address_space,
328 sections[lp.ptr].size.lo, addr)) {
329 return &sections[lp.ptr];
330 } else {
331 return &sections[PHYS_SECTION_UNASSIGNED];
332 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200333}
334
Blue Swirle5548612012-04-21 13:08:33 +0000335bool memory_region_is_unassigned(MemoryRegion *mr)
336{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200337 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000338 && mr != &io_mem_watch;
339}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200340
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100341/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200342static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200343 hwaddr addr,
344 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200345{
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 MemoryRegionSection *section;
347 subpage_t *subpage;
348
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200349 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200350 if (resolve_subpage && section->mr->subpage) {
351 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 }
354 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200355}
356
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100357/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200358static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200359address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200361{
362 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200363 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200365
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200366 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367 /* Compute offset within MemoryRegionSection */
368 addr -= section->offset_within_address_space;
369
370 /* Compute offset within MemoryRegion */
371 *xlat = addr + section->offset_within_region;
372
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200373 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200374
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
380 * here.
381 *
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
385 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200387 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 return section;
391}
Jan Kiszka90260c62013-05-26 21:46:51 +0200392
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100393static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
394{
395 if (memory_region_is_ram(mr)) {
396 return !(is_write && mr->readonly);
397 }
398 if (memory_region_is_romd(mr)) {
399 return !is_write;
400 }
401
402 return false;
403}
404
Paolo Bonzini41063e12015-03-18 14:21:43 +0100405/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200406MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
407 hwaddr *xlat, hwaddr *plen,
408 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200409{
Avi Kivity30951152012-10-30 13:47:46 +0200410 IOMMUTLBEntry iotlb;
411 MemoryRegionSection *section;
412 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200413
414 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100415 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
416 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200417 mr = section->mr;
418
419 if (!mr->iommu_ops) {
420 break;
421 }
422
Le Tan8d7b8cb2014-08-16 13:55:37 +0800423 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200424 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
425 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700426 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200427 if (!(iotlb.perm & (1 << is_write))) {
428 mr = &io_mem_unassigned;
429 break;
430 }
431
432 as = iotlb.target_as;
433 }
434
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000435 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100436 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700437 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100438 }
439
Avi Kivity30951152012-10-30 13:47:46 +0200440 *xlat = addr;
441 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200442}
443
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100444/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200445MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200446address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
447 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200448{
Avi Kivity30951152012-10-30 13:47:46 +0200449 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100450 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200451 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200452
453 assert(!section->mr->iommu_ops);
454 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200455}
bellard9fa3e852004-01-04 18:06:42 +0000456#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000457
Andreas Färberb170fce2013-01-20 20:23:22 +0100458#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000459
Juan Quintelae59fb372009-09-29 22:48:21 +0200460static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200461{
Andreas Färber259186a2013-01-17 18:51:17 +0100462 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200463
aurel323098dba2009-03-07 21:28:24 +0000464 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
465 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100466 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100467 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000468
469 return 0;
470}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200471
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400472static int cpu_common_pre_load(void *opaque)
473{
474 CPUState *cpu = opaque;
475
Paolo Bonziniadee6422014-12-19 12:53:14 +0100476 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400477
478 return 0;
479}
480
481static bool cpu_common_exception_index_needed(void *opaque)
482{
483 CPUState *cpu = opaque;
484
Paolo Bonziniadee6422014-12-19 12:53:14 +0100485 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400486}
487
488static const VMStateDescription vmstate_cpu_common_exception_index = {
489 .name = "cpu_common/exception_index",
490 .version_id = 1,
491 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200492 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400493 .fields = (VMStateField[]) {
494 VMSTATE_INT32(exception_index, CPUState),
495 VMSTATE_END_OF_LIST()
496 }
497};
498
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300499static bool cpu_common_crash_occurred_needed(void *opaque)
500{
501 CPUState *cpu = opaque;
502
503 return cpu->crash_occurred;
504}
505
506static const VMStateDescription vmstate_cpu_common_crash_occurred = {
507 .name = "cpu_common/crash_occurred",
508 .version_id = 1,
509 .minimum_version_id = 1,
510 .needed = cpu_common_crash_occurred_needed,
511 .fields = (VMStateField[]) {
512 VMSTATE_BOOL(crash_occurred, CPUState),
513 VMSTATE_END_OF_LIST()
514 }
515};
516
Andreas Färber1a1562f2013-06-17 04:09:11 +0200517const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200518 .name = "cpu_common",
519 .version_id = 1,
520 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400521 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200522 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200523 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100524 VMSTATE_UINT32(halted, CPUState),
525 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200526 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400527 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200528 .subsections = (const VMStateDescription*[]) {
529 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300530 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200531 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 }
533};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200534
pbrook9656f322008-07-01 20:01:19 +0000535#endif
536
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100537CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400538{
Andreas Färberbdc44642013-06-24 23:50:24 +0200539 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400540
Andreas Färberbdc44642013-06-24 23:50:24 +0200541 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100542 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200543 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100544 }
Glauber Costa950f1472009-06-09 12:15:18 -0400545 }
546
Andreas Färberbdc44642013-06-24 23:50:24 +0200547 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400548}
549
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000550#if !defined(CONFIG_USER_ONLY)
551void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
552{
553 /* We only support one address space per cpu at the moment. */
554 assert(cpu->as == as);
555
Peter Maydell32857f42015-10-01 15:29:50 +0100556 if (cpu->cpu_ases) {
557 /* We've already registered the listener for our only AS */
558 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000559 }
Peter Maydell32857f42015-10-01 15:29:50 +0100560
561 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
562 cpu->cpu_ases[0].cpu = cpu;
563 cpu->cpu_ases[0].as = as;
564 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
565 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000566}
567#endif
568
Bharata B Raob7bca732015-06-23 19:31:13 -0700569#ifndef CONFIG_USER_ONLY
570static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
571
572static int cpu_get_free_index(Error **errp)
573{
574 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
575
576 if (cpu >= MAX_CPUMASK_BITS) {
577 error_setg(errp, "Trying to use more CPUs than max of %d",
578 MAX_CPUMASK_BITS);
579 return -1;
580 }
581
582 bitmap_set(cpu_index_map, cpu, 1);
583 return cpu;
584}
585
586void cpu_exec_exit(CPUState *cpu)
587{
588 if (cpu->cpu_index == -1) {
589 /* cpu_index was never allocated by this @cpu or was already freed. */
590 return;
591 }
592
593 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
594 cpu->cpu_index = -1;
595}
596#else
597
598static int cpu_get_free_index(Error **errp)
599{
600 CPUState *some_cpu;
601 int cpu_index = 0;
602
603 CPU_FOREACH(some_cpu) {
604 cpu_index++;
605 }
606 return cpu_index;
607}
608
609void cpu_exec_exit(CPUState *cpu)
610{
611}
612#endif
613
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700614void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000615{
Andreas Färberb170fce2013-01-20 20:23:22 +0100616 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000617 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700618 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000619
Eduardo Habkost291135b2015-04-27 17:00:33 -0300620#ifndef CONFIG_USER_ONLY
621 cpu->as = &address_space_memory;
622 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300623#endif
624
pbrookc2764712009-03-07 15:24:59 +0000625#if defined(CONFIG_USER_ONLY)
626 cpu_list_lock();
627#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700628 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
629 if (local_err) {
630 error_propagate(errp, local_err);
631#if defined(CONFIG_USER_ONLY)
632 cpu_list_unlock();
633#endif
634 return;
bellard6a00d602005-11-21 23:25:50 +0000635 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200636 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000637#if defined(CONFIG_USER_ONLY)
638 cpu_list_unlock();
639#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200640 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
641 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
642 }
pbrookb3c77242008-06-30 16:31:04 +0000643#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600644 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700645 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100646 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200647 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000648#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 if (cc->vmsd != NULL) {
650 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
651 }
bellardfd6ce8f2003-05-14 19:00:11 +0000652}
653
Paul Brook94df27f2010-02-28 23:47:45 +0000654#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200655static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000656{
657 tb_invalidate_phys_page_range(pc, pc + 1, 0);
658}
659#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200660static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400661{
Max Filippove8262a12013-09-27 22:29:17 +0400662 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
663 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000664 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100665 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400666 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400667}
bellardc27004e2005-01-03 23:35:10 +0000668#endif
bellardd720b932004-04-25 17:57:43 +0000669
Paul Brookc527ee82010-03-01 03:31:14 +0000670#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200671void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000672
673{
674}
675
Peter Maydell3ee887e2014-09-12 14:06:48 +0100676int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
677 int flags)
678{
679 return -ENOSYS;
680}
681
682void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
683{
684}
685
Andreas Färber75a34032013-09-02 16:57:02 +0200686int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000687 int flags, CPUWatchpoint **watchpoint)
688{
689 return -ENOSYS;
690}
691#else
pbrook6658ffb2007-03-16 23:58:11 +0000692/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200693int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000694 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000695{
aliguoric0ce9982008-11-25 22:13:57 +0000696 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000697
Peter Maydell05068c02014-09-12 14:06:48 +0100698 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700699 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200700 error_report("tried to set invalid watchpoint at %"
701 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000702 return -EINVAL;
703 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500704 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000705
aliguoria1d1bb32008-11-18 20:07:32 +0000706 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100707 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000708 wp->flags = flags;
709
aliguori2dc9f412008-11-18 20:56:59 +0000710 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200711 if (flags & BP_GDB) {
712 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
713 } else {
714 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
715 }
aliguoria1d1bb32008-11-18 20:07:32 +0000716
Andreas Färber31b030d2013-09-04 01:29:02 +0200717 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000718
719 if (watchpoint)
720 *watchpoint = wp;
721 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000722}
723
aliguoria1d1bb32008-11-18 20:07:32 +0000724/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200725int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000726 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000727{
aliguoria1d1bb32008-11-18 20:07:32 +0000728 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000729
Andreas Färberff4700b2013-08-26 18:23:18 +0200730 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100731 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000732 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200733 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000734 return 0;
735 }
736 }
aliguoria1d1bb32008-11-18 20:07:32 +0000737 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000738}
739
aliguoria1d1bb32008-11-18 20:07:32 +0000740/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200741void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000742{
Andreas Färberff4700b2013-08-26 18:23:18 +0200743 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000744
Andreas Färber31b030d2013-09-04 01:29:02 +0200745 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000746
Anthony Liguori7267c092011-08-20 22:09:37 -0500747 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000748}
749
aliguoria1d1bb32008-11-18 20:07:32 +0000750/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200751void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000752{
aliguoric0ce9982008-11-25 22:13:57 +0000753 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000754
Andreas Färberff4700b2013-08-26 18:23:18 +0200755 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200756 if (wp->flags & mask) {
757 cpu_watchpoint_remove_by_ref(cpu, wp);
758 }
aliguoric0ce9982008-11-25 22:13:57 +0000759 }
aliguoria1d1bb32008-11-18 20:07:32 +0000760}
Peter Maydell05068c02014-09-12 14:06:48 +0100761
762/* Return true if this watchpoint address matches the specified
763 * access (ie the address range covered by the watchpoint overlaps
764 * partially or completely with the address range covered by the
765 * access).
766 */
767static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
768 vaddr addr,
769 vaddr len)
770{
771 /* We know the lengths are non-zero, but a little caution is
772 * required to avoid errors in the case where the range ends
773 * exactly at the top of the address space and so addr + len
774 * wraps round to zero.
775 */
776 vaddr wpend = wp->vaddr + wp->len - 1;
777 vaddr addrend = addr + len - 1;
778
779 return !(addr > wpend || wp->vaddr > addrend);
780}
781
Paul Brookc527ee82010-03-01 03:31:14 +0000782#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000783
784/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200785int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000786 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000787{
aliguoric0ce9982008-11-25 22:13:57 +0000788 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000789
Anthony Liguori7267c092011-08-20 22:09:37 -0500790 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000791
792 bp->pc = pc;
793 bp->flags = flags;
794
aliguori2dc9f412008-11-18 20:56:59 +0000795 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200796 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200797 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200798 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200799 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200800 }
aliguoria1d1bb32008-11-18 20:07:32 +0000801
Andreas Färberf0c3c502013-08-26 21:22:53 +0200802 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000803
Andreas Färber00b941e2013-06-29 18:55:54 +0200804 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000805 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200806 }
aliguoria1d1bb32008-11-18 20:07:32 +0000807 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000808}
809
810/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200811int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000812{
aliguoria1d1bb32008-11-18 20:07:32 +0000813 CPUBreakpoint *bp;
814
Andreas Färberf0c3c502013-08-26 21:22:53 +0200815 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000816 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200817 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000818 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000819 }
bellard4c3a88a2003-07-26 12:06:08 +0000820 }
aliguoria1d1bb32008-11-18 20:07:32 +0000821 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000822}
823
aliguoria1d1bb32008-11-18 20:07:32 +0000824/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200825void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000826{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200827 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
828
829 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000830
Anthony Liguori7267c092011-08-20 22:09:37 -0500831 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000832}
833
834/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200835void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000836{
aliguoric0ce9982008-11-25 22:13:57 +0000837 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000838
Andreas Färberf0c3c502013-08-26 21:22:53 +0200839 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200840 if (bp->flags & mask) {
841 cpu_breakpoint_remove_by_ref(cpu, bp);
842 }
aliguoric0ce9982008-11-25 22:13:57 +0000843 }
bellard4c3a88a2003-07-26 12:06:08 +0000844}
845
bellardc33a3462003-07-29 20:50:33 +0000846/* enable or disable single step mode. EXCP_DEBUG is returned by the
847 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200848void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000849{
Andreas Färbered2803d2013-06-21 20:20:45 +0200850 if (cpu->singlestep_enabled != enabled) {
851 cpu->singlestep_enabled = enabled;
852 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200853 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200854 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100855 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000856 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700857 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000858 }
bellardc33a3462003-07-29 20:50:33 +0000859 }
bellardc33a3462003-07-29 20:50:33 +0000860}
861
Andreas Färbera47dddd2013-09-03 17:38:47 +0200862void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000863{
864 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000865 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000866
867 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000868 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000869 fprintf(stderr, "qemu: fatal: ");
870 vfprintf(stderr, fmt, ap);
871 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200872 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100873 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000874 qemu_log("qemu: fatal: ");
875 qemu_log_vprintf(fmt, ap2);
876 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200877 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000878 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000879 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000880 }
pbrook493ae1f2007-11-23 16:53:59 +0000881 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000882 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300883 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200884#if defined(CONFIG_USER_ONLY)
885 {
886 struct sigaction act;
887 sigfillset(&act.sa_mask);
888 act.sa_handler = SIG_DFL;
889 sigaction(SIGABRT, &act, NULL);
890 }
891#endif
bellard75012672003-06-21 13:11:07 +0000892 abort();
893}
894
bellard01243112004-01-04 15:48:17 +0000895#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400896/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200897static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
898{
899 RAMBlock *block;
900
Paolo Bonzini43771532013-09-09 17:58:40 +0200901 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200902 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200903 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200904 }
Mike Day0dc3f442013-09-05 14:41:35 -0400905 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200906 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907 goto found;
908 }
909 }
910
911 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
912 abort();
913
914found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200915 /* It is safe to write mru_block outside the iothread lock. This
916 * is what happens:
917 *
918 * mru_block = xxx
919 * rcu_read_unlock()
920 * xxx removed from list
921 * rcu_read_lock()
922 * read mru_block
923 * mru_block = NULL;
924 * call_rcu(reclaim_ramblock, xxx);
925 * rcu_read_unlock()
926 *
927 * atomic_rcu_set is not needed here. The block was already published
928 * when it was placed into the list. Here we're just making an extra
929 * copy of the pointer.
930 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200931 ram_list.mru_block = block;
932 return block;
933}
934
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200935static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000936{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700937 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200938 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200939 RAMBlock *block;
940 ram_addr_t end;
941
942 end = TARGET_PAGE_ALIGN(start + length);
943 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000944
Mike Day0dc3f442013-09-05 14:41:35 -0400945 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200946 block = qemu_get_ram_block(start);
947 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200948 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700949 CPU_FOREACH(cpu) {
950 tlb_reset_dirty(cpu, start1, length);
951 }
Mike Day0dc3f442013-09-05 14:41:35 -0400952 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200953}
954
955/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000956bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
957 ram_addr_t length,
958 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200959{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000960 unsigned long end, page;
961 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200962
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963 if (length == 0) {
964 return false;
965 }
966
967 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
968 page = start >> TARGET_PAGE_BITS;
969 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
970 page, end - page);
971
972 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200973 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200974 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000975
976 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000977}
978
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100979/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200980hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200981 MemoryRegionSection *section,
982 target_ulong vaddr,
983 hwaddr paddr, hwaddr xlat,
984 int prot,
985 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000986{
Avi Kivitya8170e52012-10-23 12:30:10 +0200987 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000988 CPUWatchpoint *wp;
989
Blue Swirlcc5bea62012-04-14 14:56:48 +0000990 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000991 /* Normal RAM. */
992 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200993 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000994 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200995 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000996 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200997 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000998 }
999 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001000 AddressSpaceDispatch *d;
1001
1002 d = atomic_rcu_read(&section->address_space->dispatch);
1003 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001004 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001005 }
1006
1007 /* Make accesses to pages with watchpoints go via the
1008 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001009 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001010 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001011 /* Avoid trapping reads of pages with a write breakpoint. */
1012 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001013 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001014 *address |= TLB_MMIO;
1015 break;
1016 }
1017 }
1018 }
1019
1020 return iotlb;
1021}
bellard9fa3e852004-01-04 18:06:42 +00001022#endif /* defined(CONFIG_USER_ONLY) */
1023
pbrooke2eef172008-06-08 01:09:01 +00001024#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001025
Anthony Liguoric227f092009-10-01 16:12:16 -05001026static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001027 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001028static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001029
Igor Mammedova2b257d2014-10-31 16:38:37 +00001030static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1031 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001032
1033/*
1034 * Set a custom physical guest memory alloator.
1035 * Accelerators with unusual needs may need this. Hopefully, we can
1036 * get rid of it eventually.
1037 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001038void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001039{
1040 phys_mem_alloc = alloc;
1041}
1042
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001043static uint16_t phys_section_add(PhysPageMap *map,
1044 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001045{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001046 /* The physical section number is ORed with a page-aligned
1047 * pointer to produce the iotlb entries. Thus it should
1048 * never overflow into the page-aligned value.
1049 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001050 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001051
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001052 if (map->sections_nb == map->sections_nb_alloc) {
1053 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1054 map->sections = g_renew(MemoryRegionSection, map->sections,
1055 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001056 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001057 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001058 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001059 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001060}
1061
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001062static void phys_section_destroy(MemoryRegion *mr)
1063{
Don Slutz55b4e802015-11-30 17:11:04 -05001064 bool have_sub_page = mr->subpage;
1065
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001066 memory_region_unref(mr);
1067
Don Slutz55b4e802015-11-30 17:11:04 -05001068 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001069 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001070 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001071 g_free(subpage);
1072 }
1073}
1074
Paolo Bonzini60926662013-05-29 12:30:26 +02001075static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001076{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001077 while (map->sections_nb > 0) {
1078 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001079 phys_section_destroy(section->mr);
1080 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001081 g_free(map->sections);
1082 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001083}
1084
Avi Kivityac1970f2012-10-03 16:22:53 +02001085static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001086{
1087 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001088 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001089 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001090 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001091 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001092 MemoryRegionSection subsection = {
1093 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001094 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001095 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001096 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001097
Avi Kivityf3705d52012-03-08 16:16:34 +02001098 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001099
Avi Kivityf3705d52012-03-08 16:16:34 +02001100 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001101 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001102 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001103 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001104 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001105 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001106 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001107 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001108 }
1109 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001110 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001111 subpage_register(subpage, start, end,
1112 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001113}
1114
1115
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001116static void register_multipage(AddressSpaceDispatch *d,
1117 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001118{
Avi Kivitya8170e52012-10-23 12:30:10 +02001119 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001120 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001121 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1122 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001123
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001124 assert(num_pages);
1125 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001126}
1127
Avi Kivityac1970f2012-10-03 16:22:53 +02001128static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001129{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001130 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001131 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001132 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001133 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001134
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001135 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1136 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1137 - now.offset_within_address_space;
1138
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001139 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001140 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001141 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001142 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001143 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001144 while (int128_ne(remain.size, now.size)) {
1145 remain.size = int128_sub(remain.size, now.size);
1146 remain.offset_within_address_space += int128_get64(now.size);
1147 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001148 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001149 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001150 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001151 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001152 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001153 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001154 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001155 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001156 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001157 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001158 }
1159}
1160
Sheng Yang62a27442010-01-26 19:21:16 +08001161void qemu_flush_coalesced_mmio_buffer(void)
1162{
1163 if (kvm_enabled())
1164 kvm_flush_coalesced_mmio_buffer();
1165}
1166
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001167void qemu_mutex_lock_ramlist(void)
1168{
1169 qemu_mutex_lock(&ram_list.mutex);
1170}
1171
1172void qemu_mutex_unlock_ramlist(void)
1173{
1174 qemu_mutex_unlock(&ram_list.mutex);
1175}
1176
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001177#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001178
1179#include <sys/vfs.h>
1180
1181#define HUGETLBFS_MAGIC 0x958458f6
1182
Hu Taofc7a5802014-09-09 13:28:01 +08001183static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001184{
1185 struct statfs fs;
1186 int ret;
1187
1188 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001189 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001190 } while (ret != 0 && errno == EINTR);
1191
1192 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001193 error_setg_errno(errp, errno, "failed to get page size of file %s",
1194 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001195 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001196 }
1197
Marcelo Tosattic9027602010-03-01 20:25:08 -03001198 return fs.f_bsize;
1199}
1200
Alex Williamson04b16652010-07-02 11:13:17 -06001201static void *file_ram_alloc(RAMBlock *block,
1202 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001203 const char *path,
1204 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001205{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001206 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001207 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001208 char *sanitized_name;
1209 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001210 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001211 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001212 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001213 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001214
Hu Taofc7a5802014-09-09 13:28:01 +08001215 hpagesize = gethugepagesize(path, &local_err);
1216 if (local_err) {
1217 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001218 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001219 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001220 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221
1222 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001223 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1224 "or larger than huge page size 0x%" PRIx64,
1225 memory, hpagesize);
1226 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001227 }
1228
1229 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001230 error_setg(errp,
1231 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001232 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001233 }
1234
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001235 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1236 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1237 sanitized_name = g_strdup(memory_region_name(block->mr));
1238 for (c = sanitized_name; *c != '\0'; c++) {
1239 if (*c == '/') {
1240 *c = '_';
1241 }
1242 }
1243
1244 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1245 sanitized_name);
1246 g_free(sanitized_name);
1247
1248 fd = mkstemp(filename);
1249 if (fd >= 0) {
1250 unlink(filename);
1251 }
1252 g_free(filename);
1253 } else {
1254 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001255 }
1256
Marcelo Tosattic9027602010-03-01 20:25:08 -03001257 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001258 error_setg_errno(errp, errno,
1259 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001260 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001261 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001262
Chen Hanxiao9284f312015-07-24 11:12:03 +08001263 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264
1265 /*
1266 * ftruncate is not supported by hugetlbfs in older
1267 * hosts, so don't bother bailing out on errors.
1268 * If anything goes wrong with it under other filesystems,
1269 * mmap will fail.
1270 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001271 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001272 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001273 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001275 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001276 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001277 error_setg_errno(errp, errno,
1278 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001279 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001280 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001281 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001282
1283 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001284 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001285 }
1286
Alex Williamson04b16652010-07-02 11:13:17 -06001287 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001288 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001289
1290error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001291 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001292}
1293#endif
1294
Mike Day0dc3f442013-09-05 14:41:35 -04001295/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001296static ram_addr_t find_ram_offset(ram_addr_t size)
1297{
Alex Williamson04b16652010-07-02 11:13:17 -06001298 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001299 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001300
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001301 assert(size != 0); /* it would hand out same offset multiple times */
1302
Mike Day0dc3f442013-09-05 14:41:35 -04001303 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001304 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001305 }
Alex Williamson04b16652010-07-02 11:13:17 -06001306
Mike Day0dc3f442013-09-05 14:41:35 -04001307 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001308 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001309
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001310 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001311
Mike Day0dc3f442013-09-05 14:41:35 -04001312 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001313 if (next_block->offset >= end) {
1314 next = MIN(next, next_block->offset);
1315 }
1316 }
1317 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001318 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001319 mingap = next - end;
1320 }
1321 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001322
1323 if (offset == RAM_ADDR_MAX) {
1324 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1325 (uint64_t)size);
1326 abort();
1327 }
1328
Alex Williamson04b16652010-07-02 11:13:17 -06001329 return offset;
1330}
1331
Juan Quintela652d7ec2012-07-20 10:37:54 +02001332ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001333{
Alex Williamsond17b5282010-06-25 11:08:38 -06001334 RAMBlock *block;
1335 ram_addr_t last = 0;
1336
Mike Day0dc3f442013-09-05 14:41:35 -04001337 rcu_read_lock();
1338 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001339 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001340 }
Mike Day0dc3f442013-09-05 14:41:35 -04001341 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001342 return last;
1343}
1344
Jason Baronddb97f12012-08-02 15:44:16 -04001345static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1346{
1347 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001348
1349 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001350 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001351 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1352 if (ret) {
1353 perror("qemu_madvise");
1354 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1355 "but dump_guest_core=off specified\n");
1356 }
1357 }
1358}
1359
Mike Day0dc3f442013-09-05 14:41:35 -04001360/* Called within an RCU critical section, or while the ramlist lock
1361 * is held.
1362 */
Hu Tao20cfe882014-04-02 15:13:26 +08001363static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001364{
Hu Tao20cfe882014-04-02 15:13:26 +08001365 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001366
Mike Day0dc3f442013-09-05 14:41:35 -04001367 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001368 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001369 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001370 }
1371 }
Hu Tao20cfe882014-04-02 15:13:26 +08001372
1373 return NULL;
1374}
1375
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001376const char *qemu_ram_get_idstr(RAMBlock *rb)
1377{
1378 return rb->idstr;
1379}
1380
Mike Dayae3a7042013-09-05 14:41:35 -04001381/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001382void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1383{
Mike Dayae3a7042013-09-05 14:41:35 -04001384 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001385
Mike Day0dc3f442013-09-05 14:41:35 -04001386 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001387 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001388 assert(new_block);
1389 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001390
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001391 if (dev) {
1392 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001393 if (id) {
1394 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001395 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001396 }
1397 }
1398 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1399
Mike Day0dc3f442013-09-05 14:41:35 -04001400 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001401 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001402 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1403 new_block->idstr);
1404 abort();
1405 }
1406 }
Mike Day0dc3f442013-09-05 14:41:35 -04001407 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001408}
1409
Mike Dayae3a7042013-09-05 14:41:35 -04001410/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001411void qemu_ram_unset_idstr(ram_addr_t addr)
1412{
Mike Dayae3a7042013-09-05 14:41:35 -04001413 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001414
Mike Dayae3a7042013-09-05 14:41:35 -04001415 /* FIXME: arch_init.c assumes that this is not called throughout
1416 * migration. Ignore the problem since hot-unplug during migration
1417 * does not work anyway.
1418 */
1419
Mike Day0dc3f442013-09-05 14:41:35 -04001420 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001421 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001422 if (block) {
1423 memset(block->idstr, 0, sizeof(block->idstr));
1424 }
Mike Day0dc3f442013-09-05 14:41:35 -04001425 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001426}
1427
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001428static int memory_try_enable_merging(void *addr, size_t len)
1429{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001430 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001431 /* disabled by the user */
1432 return 0;
1433 }
1434
1435 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1436}
1437
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001438/* Only legal before guest might have detected the memory size: e.g. on
1439 * incoming migration, or right after reset.
1440 *
1441 * As memory core doesn't know how is memory accessed, it is up to
1442 * resize callback to update device state and/or add assertions to detect
1443 * misuse, if necessary.
1444 */
1445int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1446{
1447 RAMBlock *block = find_ram_block(base);
1448
1449 assert(block);
1450
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001451 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001452
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001453 if (block->used_length == newsize) {
1454 return 0;
1455 }
1456
1457 if (!(block->flags & RAM_RESIZEABLE)) {
1458 error_setg_errno(errp, EINVAL,
1459 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1460 " in != 0x" RAM_ADDR_FMT, block->idstr,
1461 newsize, block->used_length);
1462 return -EINVAL;
1463 }
1464
1465 if (block->max_length < newsize) {
1466 error_setg_errno(errp, EINVAL,
1467 "Length too large: %s: 0x" RAM_ADDR_FMT
1468 " > 0x" RAM_ADDR_FMT, block->idstr,
1469 newsize, block->max_length);
1470 return -EINVAL;
1471 }
1472
1473 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1474 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001475 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1476 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001477 memory_region_set_size(block->mr, newsize);
1478 if (block->resized) {
1479 block->resized(block->idstr, newsize, block->host);
1480 }
1481 return 0;
1482}
1483
Hu Taoef701d72014-09-09 13:27:54 +08001484static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001485{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001486 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001487 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001488 ram_addr_t old_ram_size, new_ram_size;
1489
1490 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001491
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001492 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001493 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001494
1495 if (!new_block->host) {
1496 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001497 xen_ram_alloc(new_block->offset, new_block->max_length,
1498 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001499 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001500 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001501 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001502 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001503 error_setg_errno(errp, errno,
1504 "cannot set up guest memory '%s'",
1505 memory_region_name(new_block->mr));
1506 qemu_mutex_unlock_ramlist();
1507 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001508 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001509 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001510 }
1511 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001512
Li Zhijiandd631692015-07-02 20:18:06 +08001513 new_ram_size = MAX(old_ram_size,
1514 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1515 if (new_ram_size > old_ram_size) {
1516 migration_bitmap_extend(old_ram_size, new_ram_size);
1517 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001518 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1519 * QLIST (which has an RCU-friendly variant) does not have insertion at
1520 * tail, so save the last element in last_block.
1521 */
Mike Day0dc3f442013-09-05 14:41:35 -04001522 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001523 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001524 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001525 break;
1526 }
1527 }
1528 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001529 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001530 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001531 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001532 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001533 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001534 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001535 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001536
Mike Day0dc3f442013-09-05 14:41:35 -04001537 /* Write list before version */
1538 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001539 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001540 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001541
Juan Quintela2152f5c2013-10-08 13:52:02 +02001542 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1543
1544 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001545 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001546
1547 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001548 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1549 ram_list.dirty_memory[i] =
1550 bitmap_zero_extend(ram_list.dirty_memory[i],
1551 old_ram_size, new_ram_size);
1552 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001553 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001554 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001555 new_block->used_length,
1556 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001557
Paolo Bonzinia904c912015-01-21 16:18:35 +01001558 if (new_block->host) {
1559 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1560 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1561 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1562 if (kvm_enabled()) {
1563 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1564 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001565 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001566
1567 return new_block->offset;
1568}
1569
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001570#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001571ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001572 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001573 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001574{
1575 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001576 ram_addr_t addr;
1577 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001578
1579 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001580 error_setg(errp, "-mem-path not supported with Xen");
1581 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001582 }
1583
1584 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1585 /*
1586 * file_ram_alloc() needs to allocate just like
1587 * phys_mem_alloc, but we haven't bothered to provide
1588 * a hook there.
1589 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001590 error_setg(errp,
1591 "-mem-path not supported with this accelerator");
1592 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001593 }
1594
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001595 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001596 new_block = g_malloc0(sizeof(*new_block));
1597 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001598 new_block->used_length = size;
1599 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001600 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001601 new_block->host = file_ram_alloc(new_block, size,
1602 mem_path, errp);
1603 if (!new_block->host) {
1604 g_free(new_block);
1605 return -1;
1606 }
1607
Hu Taoef701d72014-09-09 13:27:54 +08001608 addr = ram_block_add(new_block, &local_err);
1609 if (local_err) {
1610 g_free(new_block);
1611 error_propagate(errp, local_err);
1612 return -1;
1613 }
1614 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001615}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001616#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001617
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001618static
1619ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1620 void (*resized)(const char*,
1621 uint64_t length,
1622 void *host),
1623 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001624 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001625{
1626 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001627 ram_addr_t addr;
1628 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001629
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001630 size = HOST_PAGE_ALIGN(size);
1631 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001632 new_block = g_malloc0(sizeof(*new_block));
1633 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001634 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001635 new_block->used_length = size;
1636 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001637 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001638 new_block->fd = -1;
1639 new_block->host = host;
1640 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001641 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001643 if (resizeable) {
1644 new_block->flags |= RAM_RESIZEABLE;
1645 }
Hu Taoef701d72014-09-09 13:27:54 +08001646 addr = ram_block_add(new_block, &local_err);
1647 if (local_err) {
1648 g_free(new_block);
1649 error_propagate(errp, local_err);
1650 return -1;
1651 }
1652 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001653}
1654
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001655ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1656 MemoryRegion *mr, Error **errp)
1657{
1658 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1659}
1660
Hu Taoef701d72014-09-09 13:27:54 +08001661ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001662{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001663 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1664}
1665
1666ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1667 void (*resized)(const char*,
1668 uint64_t length,
1669 void *host),
1670 MemoryRegion *mr, Error **errp)
1671{
1672 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001673}
bellarde9a1ab12007-02-08 23:08:38 +00001674
Paolo Bonzini43771532013-09-09 17:58:40 +02001675static void reclaim_ramblock(RAMBlock *block)
1676{
1677 if (block->flags & RAM_PREALLOC) {
1678 ;
1679 } else if (xen_enabled()) {
1680 xen_invalidate_map_cache_entry(block->host);
1681#ifndef _WIN32
1682 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001683 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001684 close(block->fd);
1685#endif
1686 } else {
1687 qemu_anon_ram_free(block->host, block->max_length);
1688 }
1689 g_free(block);
1690}
1691
Anthony Liguoric227f092009-10-01 16:12:16 -05001692void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001693{
Alex Williamson04b16652010-07-02 11:13:17 -06001694 RAMBlock *block;
1695
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001696 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001697 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001698 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001699 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001700 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001701 /* Write list before version */
1702 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001703 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001704 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001705 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001706 }
1707 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001708 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001709}
1710
Huang Yingcd19cfa2011-03-02 08:56:19 +01001711#ifndef _WIN32
1712void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1713{
1714 RAMBlock *block;
1715 ram_addr_t offset;
1716 int flags;
1717 void *area, *vaddr;
1718
Mike Day0dc3f442013-09-05 14:41:35 -04001719 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001720 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001721 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001722 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001723 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001724 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001725 } else if (xen_enabled()) {
1726 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001727 } else {
1728 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001729 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001730 flags |= (block->flags & RAM_SHARED ?
1731 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001732 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1733 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001734 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001735 /*
1736 * Remap needs to match alloc. Accelerators that
1737 * set phys_mem_alloc never remap. If they did,
1738 * we'd need a remap hook here.
1739 */
1740 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1741
Huang Yingcd19cfa2011-03-02 08:56:19 +01001742 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1743 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1744 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001745 }
1746 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001747 fprintf(stderr, "Could not remap addr: "
1748 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001749 length, addr);
1750 exit(1);
1751 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001752 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001753 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001754 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001755 }
1756 }
1757}
1758#endif /* !_WIN32 */
1759
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001760int qemu_get_ram_fd(ram_addr_t addr)
1761{
Mike Dayae3a7042013-09-05 14:41:35 -04001762 RAMBlock *block;
1763 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001764
Mike Day0dc3f442013-09-05 14:41:35 -04001765 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001766 block = qemu_get_ram_block(addr);
1767 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001768 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001769 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001770}
1771
Damjan Marion3fd74b82014-06-26 23:01:32 +02001772void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1773{
Mike Dayae3a7042013-09-05 14:41:35 -04001774 RAMBlock *block;
1775 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001776
Mike Day0dc3f442013-09-05 14:41:35 -04001777 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001778 block = qemu_get_ram_block(addr);
1779 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001780 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001781 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001782}
1783
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001784/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001785 * This should not be used for general purpose DMA. Use address_space_map
1786 * or address_space_rw instead. For local memory (e.g. video ram) that the
1787 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001788 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001789 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001790 */
1791void *qemu_get_ram_ptr(ram_addr_t addr)
1792{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001793 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001794
1795 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001796 /* We need to check if the requested address is in the RAM
1797 * because we don't want to map the entire memory in QEMU.
1798 * In that case just map until the end of the page.
1799 */
1800 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001801 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001802 }
Mike Dayae3a7042013-09-05 14:41:35 -04001803
1804 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001805 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001806 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001807}
1808
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001809/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001810 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001811 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001812 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001813 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001814static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001815{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001816 RAMBlock *block;
1817 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001818 if (*size == 0) {
1819 return NULL;
1820 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001821
1822 block = qemu_get_ram_block(addr);
1823 offset_inside_block = addr - block->offset;
1824 *size = MIN(*size, block->max_length - offset_inside_block);
1825
1826 if (xen_enabled() && block->host == NULL) {
1827 /* We need to check if the requested address is in the RAM
1828 * because we don't want to map the entire memory in QEMU.
1829 * In that case just map the requested area.
1830 */
1831 if (block->offset == 0) {
1832 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001833 }
1834
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001835 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001836 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001837
1838 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001839}
1840
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001841/*
1842 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1843 * in that RAMBlock.
1844 *
1845 * ptr: Host pointer to look up
1846 * round_offset: If true round the result offset down to a page boundary
1847 * *ram_addr: set to result ram_addr
1848 * *offset: set to result offset within the RAMBlock
1849 *
1850 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001851 *
1852 * By the time this function returns, the returned pointer is not protected
1853 * by RCU anymore. If the caller is not within an RCU critical section and
1854 * does not hold the iothread lock, it must have other means of protecting the
1855 * pointer, such as a reference to the region that includes the incoming
1856 * ram_addr_t.
1857 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001858RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1859 ram_addr_t *ram_addr,
1860 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001861{
pbrook94a6b542009-04-11 17:15:54 +00001862 RAMBlock *block;
1863 uint8_t *host = ptr;
1864
Jan Kiszka868bb332011-06-21 22:59:09 +02001865 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001866 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001867 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001868 block = qemu_get_ram_block(*ram_addr);
1869 if (block) {
1870 *offset = (host - block->host);
1871 }
Mike Day0dc3f442013-09-05 14:41:35 -04001872 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001873 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001874 }
1875
Mike Day0dc3f442013-09-05 14:41:35 -04001876 rcu_read_lock();
1877 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001878 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001879 goto found;
1880 }
1881
Mike Day0dc3f442013-09-05 14:41:35 -04001882 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001883 /* This case append when the block is not mapped. */
1884 if (block->host == NULL) {
1885 continue;
1886 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001887 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001888 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001889 }
pbrook94a6b542009-04-11 17:15:54 +00001890 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001891
Mike Day0dc3f442013-09-05 14:41:35 -04001892 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001893 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001894
1895found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001896 *offset = (host - block->host);
1897 if (round_offset) {
1898 *offset &= TARGET_PAGE_MASK;
1899 }
1900 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001901 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001902 return block;
1903}
1904
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001905/*
1906 * Finds the named RAMBlock
1907 *
1908 * name: The name of RAMBlock to find
1909 *
1910 * Returns: RAMBlock (or NULL if not found)
1911 */
1912RAMBlock *qemu_ram_block_by_name(const char *name)
1913{
1914 RAMBlock *block;
1915
1916 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1917 if (!strcmp(name, block->idstr)) {
1918 return block;
1919 }
1920 }
1921
1922 return NULL;
1923}
1924
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001925/* Some of the softmmu routines need to translate from a host pointer
1926 (typically a TLB entry) back to a ram offset. */
1927MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1928{
1929 RAMBlock *block;
1930 ram_addr_t offset; /* Not used */
1931
1932 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1933
1934 if (!block) {
1935 return NULL;
1936 }
1937
1938 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001939}
Alex Williamsonf471a172010-06-11 11:11:42 -06001940
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001941/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001942static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001943 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001944{
Juan Quintela52159192013-10-08 12:44:04 +02001945 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001946 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001947 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001948 switch (size) {
1949 case 1:
1950 stb_p(qemu_get_ram_ptr(ram_addr), val);
1951 break;
1952 case 2:
1953 stw_p(qemu_get_ram_ptr(ram_addr), val);
1954 break;
1955 case 4:
1956 stl_p(qemu_get_ram_ptr(ram_addr), val);
1957 break;
1958 default:
1959 abort();
1960 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001961 /* Set both VGA and migration bits for simplicity and to remove
1962 * the notdirty callback faster.
1963 */
1964 cpu_physical_memory_set_dirty_range(ram_addr, size,
1965 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001966 /* we remove the notdirty callback only if the code has been
1967 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001968 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001969 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001970 }
bellard1ccde1c2004-02-06 19:46:14 +00001971}
1972
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001973static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1974 unsigned size, bool is_write)
1975{
1976 return is_write;
1977}
1978
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001979static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001980 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001981 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001982 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001983};
1984
pbrook0f459d12008-06-09 00:20:13 +00001985/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001986static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001987{
Andreas Färber93afead2013-08-26 03:41:01 +02001988 CPUState *cpu = current_cpu;
1989 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001990 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001991 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001992 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001993 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001994
Andreas Färberff4700b2013-08-26 18:23:18 +02001995 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001996 /* We re-entered the check after replacing the TB. Now raise
1997 * the debug interrupt so that is will trigger after the
1998 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001999 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002000 return;
2001 }
Andreas Färber93afead2013-08-26 03:41:01 +02002002 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002003 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002004 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2005 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002006 if (flags == BP_MEM_READ) {
2007 wp->flags |= BP_WATCHPOINT_HIT_READ;
2008 } else {
2009 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2010 }
2011 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002012 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002013 if (!cpu->watchpoint_hit) {
2014 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002015 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002016 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002017 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002018 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002019 } else {
2020 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002021 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002022 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002023 }
aliguori06d55cc2008-11-18 20:24:06 +00002024 }
aliguori6e140f22008-11-18 20:37:55 +00002025 } else {
2026 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002027 }
2028 }
2029}
2030
pbrook6658ffb2007-03-16 23:58:11 +00002031/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2032 so these check for a hit then pass through to the normal out-of-line
2033 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002034static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2035 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002036{
Peter Maydell66b9b432015-04-26 16:49:24 +01002037 MemTxResult res;
2038 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002039
Peter Maydell66b9b432015-04-26 16:49:24 +01002040 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002041 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002042 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002043 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002044 break;
2045 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002046 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002047 break;
2048 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002049 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002050 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002051 default: abort();
2052 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002053 *pdata = data;
2054 return res;
2055}
2056
2057static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2058 uint64_t val, unsigned size,
2059 MemTxAttrs attrs)
2060{
2061 MemTxResult res;
2062
2063 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2064 switch (size) {
2065 case 1:
2066 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2067 break;
2068 case 2:
2069 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2070 break;
2071 case 4:
2072 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2073 break;
2074 default: abort();
2075 }
2076 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002077}
2078
Avi Kivity1ec9b902012-01-02 12:47:48 +02002079static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002080 .read_with_attrs = watch_mem_read,
2081 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002082 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002083};
pbrook6658ffb2007-03-16 23:58:11 +00002084
Peter Maydellf25a49e2015-04-26 16:49:24 +01002085static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2086 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002087{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002088 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002089 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002090 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002091
blueswir1db7b5422007-05-26 17:36:03 +00002092#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002093 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002094 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002095#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002096 res = address_space_read(subpage->as, addr + subpage->base,
2097 attrs, buf, len);
2098 if (res) {
2099 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002100 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002101 switch (len) {
2102 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002103 *data = ldub_p(buf);
2104 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002105 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002106 *data = lduw_p(buf);
2107 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002108 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002109 *data = ldl_p(buf);
2110 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002111 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002112 *data = ldq_p(buf);
2113 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002114 default:
2115 abort();
2116 }
blueswir1db7b5422007-05-26 17:36:03 +00002117}
2118
Peter Maydellf25a49e2015-04-26 16:49:24 +01002119static MemTxResult subpage_write(void *opaque, hwaddr addr,
2120 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002121{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002122 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002123 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002124
blueswir1db7b5422007-05-26 17:36:03 +00002125#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002126 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002127 " value %"PRIx64"\n",
2128 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002129#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002130 switch (len) {
2131 case 1:
2132 stb_p(buf, value);
2133 break;
2134 case 2:
2135 stw_p(buf, value);
2136 break;
2137 case 4:
2138 stl_p(buf, value);
2139 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002140 case 8:
2141 stq_p(buf, value);
2142 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002143 default:
2144 abort();
2145 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002146 return address_space_write(subpage->as, addr + subpage->base,
2147 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002148}
2149
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002150static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002151 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002152{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002153 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002154#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002155 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002156 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002157#endif
2158
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002159 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002160 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002161}
2162
Avi Kivity70c68e42012-01-02 12:32:48 +02002163static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002164 .read_with_attrs = subpage_read,
2165 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002166 .impl.min_access_size = 1,
2167 .impl.max_access_size = 8,
2168 .valid.min_access_size = 1,
2169 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002170 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002171 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002172};
2173
Anthony Liguoric227f092009-10-01 16:12:16 -05002174static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002175 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002176{
2177 int idx, eidx;
2178
2179 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2180 return -1;
2181 idx = SUBPAGE_IDX(start);
2182 eidx = SUBPAGE_IDX(end);
2183#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002184 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2185 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002186#endif
blueswir1db7b5422007-05-26 17:36:03 +00002187 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002188 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002189 }
2190
2191 return 0;
2192}
2193
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002194static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002195{
Anthony Liguoric227f092009-10-01 16:12:16 -05002196 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002197
Anthony Liguori7267c092011-08-20 22:09:37 -05002198 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002199
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002200 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002201 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002202 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002203 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002204 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002205#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002206 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2207 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002208#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002209 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002210
2211 return mmio;
2212}
2213
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002214static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2215 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002216{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002217 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002218 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002219 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002220 .mr = mr,
2221 .offset_within_address_space = 0,
2222 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002223 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002224 };
2225
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002226 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002227}
2228
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002229MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002230{
Peter Maydell32857f42015-10-01 15:29:50 +01002231 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2232 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002233 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002234
2235 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002236}
2237
Avi Kivitye9179ce2009-06-14 11:38:52 +03002238static void io_mem_init(void)
2239{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002240 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002241 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002242 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002243 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002244 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002245 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002246 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002247}
2248
Avi Kivityac1970f2012-10-03 16:22:53 +02002249static void mem_begin(MemoryListener *listener)
2250{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002251 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002252 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2253 uint16_t n;
2254
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002255 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002256 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002257 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002258 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002259 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002260 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002261 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002262 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002263
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002264 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002265 d->as = as;
2266 as->next_dispatch = d;
2267}
2268
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002269static void address_space_dispatch_free(AddressSpaceDispatch *d)
2270{
2271 phys_sections_free(&d->map);
2272 g_free(d);
2273}
2274
Paolo Bonzini00752702013-05-29 12:13:54 +02002275static void mem_commit(MemoryListener *listener)
2276{
2277 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002278 AddressSpaceDispatch *cur = as->dispatch;
2279 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002280
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002281 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002282
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002283 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002284 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002285 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002286 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002287}
2288
Avi Kivity1d711482012-10-02 18:54:45 +02002289static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002290{
Peter Maydell32857f42015-10-01 15:29:50 +01002291 CPUAddressSpace *cpuas;
2292 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002293
2294 /* since each CPU stores ram addresses in its TLB cache, we must
2295 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002296 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2297 cpu_reloading_memory_map();
2298 /* The CPU and TLB are protected by the iothread lock.
2299 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2300 * may have split the RCU critical section.
2301 */
2302 d = atomic_rcu_read(&cpuas->as->dispatch);
2303 cpuas->memory_dispatch = d;
2304 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002305}
2306
Avi Kivityac1970f2012-10-03 16:22:53 +02002307void address_space_init_dispatch(AddressSpace *as)
2308{
Paolo Bonzini00752702013-05-29 12:13:54 +02002309 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002310 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002311 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002312 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002313 .region_add = mem_add,
2314 .region_nop = mem_add,
2315 .priority = 0,
2316 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002317 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002318}
2319
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002320void address_space_unregister(AddressSpace *as)
2321{
2322 memory_listener_unregister(&as->dispatch_listener);
2323}
2324
Avi Kivity83f3c252012-10-07 12:59:55 +02002325void address_space_destroy_dispatch(AddressSpace *as)
2326{
2327 AddressSpaceDispatch *d = as->dispatch;
2328
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002329 atomic_rcu_set(&as->dispatch, NULL);
2330 if (d) {
2331 call_rcu(d, address_space_dispatch_free, rcu);
2332 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002333}
2334
Avi Kivity62152b82011-07-26 14:26:14 +03002335static void memory_map_init(void)
2336{
Anthony Liguori7267c092011-08-20 22:09:37 -05002337 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002338
Paolo Bonzini57271d62013-11-07 17:14:37 +01002339 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002340 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002341
Anthony Liguori7267c092011-08-20 22:09:37 -05002342 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002343 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2344 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002345 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002346}
2347
2348MemoryRegion *get_system_memory(void)
2349{
2350 return system_memory;
2351}
2352
Avi Kivity309cb472011-08-08 16:09:03 +03002353MemoryRegion *get_system_io(void)
2354{
2355 return system_io;
2356}
2357
pbrooke2eef172008-06-08 01:09:01 +00002358#endif /* !defined(CONFIG_USER_ONLY) */
2359
bellard13eb76e2004-01-24 15:23:36 +00002360/* physical memory access (slow version, mainly for debug) */
2361#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002362int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002363 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002364{
2365 int l, flags;
2366 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002367 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002368
2369 while (len > 0) {
2370 page = addr & TARGET_PAGE_MASK;
2371 l = (page + TARGET_PAGE_SIZE) - addr;
2372 if (l > len)
2373 l = len;
2374 flags = page_get_flags(page);
2375 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002376 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002377 if (is_write) {
2378 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002379 return -1;
bellard579a97f2007-11-11 14:26:47 +00002380 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002381 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002382 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002383 memcpy(p, buf, l);
2384 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002385 } else {
2386 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002387 return -1;
bellard579a97f2007-11-11 14:26:47 +00002388 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002389 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002390 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002391 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002392 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002393 }
2394 len -= l;
2395 buf += l;
2396 addr += l;
2397 }
Paul Brooka68fe892010-03-01 00:08:59 +00002398 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002399}
bellard8df1cd02005-01-28 22:37:22 +00002400
bellard13eb76e2004-01-24 15:23:36 +00002401#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002402
Paolo Bonzini845b6212015-03-23 11:45:53 +01002403static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002404 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002405{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002406 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2407 /* No early return if dirty_log_mask is or becomes 0, because
2408 * cpu_physical_memory_set_dirty_range will still call
2409 * xen_modified_memory.
2410 */
2411 if (dirty_log_mask) {
2412 dirty_log_mask =
2413 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002414 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002415 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2416 tb_invalidate_phys_range(addr, addr + length);
2417 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2418 }
2419 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002420}
2421
Richard Henderson23326162013-07-08 14:55:59 -07002422static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002423{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002424 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002425
2426 /* Regions are assumed to support 1-4 byte accesses unless
2427 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002428 if (access_size_max == 0) {
2429 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002430 }
Richard Henderson23326162013-07-08 14:55:59 -07002431
2432 /* Bound the maximum access by the alignment of the address. */
2433 if (!mr->ops->impl.unaligned) {
2434 unsigned align_size_max = addr & -addr;
2435 if (align_size_max != 0 && align_size_max < access_size_max) {
2436 access_size_max = align_size_max;
2437 }
2438 }
2439
2440 /* Don't attempt accesses larger than the maximum. */
2441 if (l > access_size_max) {
2442 l = access_size_max;
2443 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002444 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002445
2446 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002447}
2448
Jan Kiszka4840f102015-06-18 18:47:22 +02002449static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002450{
Jan Kiszka4840f102015-06-18 18:47:22 +02002451 bool unlocked = !qemu_mutex_iothread_locked();
2452 bool release_lock = false;
2453
2454 if (unlocked && mr->global_locking) {
2455 qemu_mutex_lock_iothread();
2456 unlocked = false;
2457 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002458 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002459 if (mr->flush_coalesced_mmio) {
2460 if (unlocked) {
2461 qemu_mutex_lock_iothread();
2462 }
2463 qemu_flush_coalesced_mmio_buffer();
2464 if (unlocked) {
2465 qemu_mutex_unlock_iothread();
2466 }
2467 }
2468
2469 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002470}
2471
Peter Maydell5c9eb022015-04-26 16:49:24 +01002472MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2473 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002474{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002475 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002476 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002477 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002478 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002479 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002480 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002481 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002482
Paolo Bonzini41063e12015-03-18 14:21:43 +01002483 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002484 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002485 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002486 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002487
bellard13eb76e2004-01-24 15:23:36 +00002488 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002489 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002490 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002491 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002492 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002493 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002494 switch (l) {
2495 case 8:
2496 /* 64 bit write access */
2497 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002498 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2499 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002500 break;
2501 case 4:
bellard1c213d12005-09-03 10:49:04 +00002502 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002503 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002504 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2505 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002506 break;
2507 case 2:
bellard1c213d12005-09-03 10:49:04 +00002508 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002509 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002510 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2511 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002512 break;
2513 case 1:
bellard1c213d12005-09-03 10:49:04 +00002514 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002515 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002516 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2517 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002518 break;
2519 default:
2520 abort();
bellard13eb76e2004-01-24 15:23:36 +00002521 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002522 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002523 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002524 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002525 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002526 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002527 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002528 }
2529 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002530 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002531 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002532 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002533 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002534 switch (l) {
2535 case 8:
2536 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002537 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2538 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002539 stq_p(buf, val);
2540 break;
2541 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002542 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002543 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2544 attrs);
bellardc27004e2005-01-03 23:35:10 +00002545 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002546 break;
2547 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002548 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002549 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2550 attrs);
bellardc27004e2005-01-03 23:35:10 +00002551 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002552 break;
2553 case 1:
bellard1c213d12005-09-03 10:49:04 +00002554 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002555 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2556 attrs);
bellardc27004e2005-01-03 23:35:10 +00002557 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002558 break;
2559 default:
2560 abort();
bellard13eb76e2004-01-24 15:23:36 +00002561 }
2562 } else {
2563 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002564 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002565 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002566 }
2567 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002568
2569 if (release_lock) {
2570 qemu_mutex_unlock_iothread();
2571 release_lock = false;
2572 }
2573
bellard13eb76e2004-01-24 15:23:36 +00002574 len -= l;
2575 buf += l;
2576 addr += l;
2577 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002578 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002579
Peter Maydell3b643492015-04-26 16:49:23 +01002580 return result;
bellard13eb76e2004-01-24 15:23:36 +00002581}
bellard8df1cd02005-01-28 22:37:22 +00002582
Peter Maydell5c9eb022015-04-26 16:49:24 +01002583MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2584 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002585{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002586 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002587}
2588
Peter Maydell5c9eb022015-04-26 16:49:24 +01002589MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2590 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002591{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002592 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002593}
2594
2595
Avi Kivitya8170e52012-10-23 12:30:10 +02002596void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002597 int len, int is_write)
2598{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002599 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2600 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002601}
2602
Alexander Graf582b55a2013-12-11 14:17:44 +01002603enum write_rom_type {
2604 WRITE_DATA,
2605 FLUSH_CACHE,
2606};
2607
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002608static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002609 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002610{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002611 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002612 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002613 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002614 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002615
Paolo Bonzini41063e12015-03-18 14:21:43 +01002616 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002617 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002618 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002619 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002620
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002621 if (!(memory_region_is_ram(mr) ||
2622 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002623 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002624 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002625 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002626 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002627 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002628 switch (type) {
2629 case WRITE_DATA:
2630 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002631 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002632 break;
2633 case FLUSH_CACHE:
2634 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2635 break;
2636 }
bellardd0ecd2a2006-04-23 17:14:48 +00002637 }
2638 len -= l;
2639 buf += l;
2640 addr += l;
2641 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002642 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002643}
2644
Alexander Graf582b55a2013-12-11 14:17:44 +01002645/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002646void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002647 const uint8_t *buf, int len)
2648{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002649 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002650}
2651
2652void cpu_flush_icache_range(hwaddr start, int len)
2653{
2654 /*
2655 * This function should do the same thing as an icache flush that was
2656 * triggered from within the guest. For TCG we are always cache coherent,
2657 * so there is no need to flush anything. For KVM / Xen we need to flush
2658 * the host's instruction cache at least.
2659 */
2660 if (tcg_enabled()) {
2661 return;
2662 }
2663
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002664 cpu_physical_memory_write_rom_internal(&address_space_memory,
2665 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002666}
2667
aliguori6d16c2f2009-01-22 16:59:11 +00002668typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002669 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002670 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002671 hwaddr addr;
2672 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002673 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002674} BounceBuffer;
2675
2676static BounceBuffer bounce;
2677
aliguoriba223c22009-01-22 16:59:16 +00002678typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002679 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002680 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002681} MapClient;
2682
Fam Zheng38e047b2015-03-16 17:03:35 +08002683QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002684static QLIST_HEAD(map_client_list, MapClient) map_client_list
2685 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002686
Fam Zhenge95205e2015-03-16 17:03:37 +08002687static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002688{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002689 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002690 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002691}
2692
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002693static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002694{
2695 MapClient *client;
2696
Blue Swirl72cf2d42009-09-12 07:36:22 +00002697 while (!QLIST_EMPTY(&map_client_list)) {
2698 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002699 qemu_bh_schedule(client->bh);
2700 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002701 }
2702}
2703
Fam Zhenge95205e2015-03-16 17:03:37 +08002704void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002705{
2706 MapClient *client = g_malloc(sizeof(*client));
2707
Fam Zheng38e047b2015-03-16 17:03:35 +08002708 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002709 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002710 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002711 if (!atomic_read(&bounce.in_use)) {
2712 cpu_notify_map_clients_locked();
2713 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002714 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002715}
2716
Fam Zheng38e047b2015-03-16 17:03:35 +08002717void cpu_exec_init_all(void)
2718{
2719 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002720 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002721 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002722 qemu_mutex_init(&map_client_list_lock);
2723}
2724
Fam Zhenge95205e2015-03-16 17:03:37 +08002725void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002726{
Fam Zhenge95205e2015-03-16 17:03:37 +08002727 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002728
Fam Zhenge95205e2015-03-16 17:03:37 +08002729 qemu_mutex_lock(&map_client_list_lock);
2730 QLIST_FOREACH(client, &map_client_list, link) {
2731 if (client->bh == bh) {
2732 cpu_unregister_map_client_do(client);
2733 break;
2734 }
2735 }
2736 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002737}
2738
2739static void cpu_notify_map_clients(void)
2740{
Fam Zheng38e047b2015-03-16 17:03:35 +08002741 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002742 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002743 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002744}
2745
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002746bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2747{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002748 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002749 hwaddr l, xlat;
2750
Paolo Bonzini41063e12015-03-18 14:21:43 +01002751 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002752 while (len > 0) {
2753 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002754 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2755 if (!memory_access_is_direct(mr, is_write)) {
2756 l = memory_access_size(mr, l, addr);
2757 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002758 return false;
2759 }
2760 }
2761
2762 len -= l;
2763 addr += l;
2764 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002765 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002766 return true;
2767}
2768
aliguori6d16c2f2009-01-22 16:59:11 +00002769/* Map a physical memory region into a host virtual address.
2770 * May map a subset of the requested range, given by and returned in *plen.
2771 * May return NULL if resources needed to perform the mapping are exhausted.
2772 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002773 * Use cpu_register_map_client() to know when retrying the map operation is
2774 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002775 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002776void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002777 hwaddr addr,
2778 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002779 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002780{
Avi Kivitya8170e52012-10-23 12:30:10 +02002781 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002782 hwaddr done = 0;
2783 hwaddr l, xlat, base;
2784 MemoryRegion *mr, *this_mr;
2785 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002786 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002787
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002788 if (len == 0) {
2789 return NULL;
2790 }
aliguori6d16c2f2009-01-22 16:59:11 +00002791
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002792 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002793 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002794 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002795
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002796 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002797 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002798 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002799 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002800 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002801 /* Avoid unbounded allocations */
2802 l = MIN(l, TARGET_PAGE_SIZE);
2803 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002804 bounce.addr = addr;
2805 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002806
2807 memory_region_ref(mr);
2808 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002809 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002810 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2811 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002812 }
aliguori6d16c2f2009-01-22 16:59:11 +00002813
Paolo Bonzini41063e12015-03-18 14:21:43 +01002814 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002815 *plen = l;
2816 return bounce.buffer;
2817 }
2818
2819 base = xlat;
2820 raddr = memory_region_get_ram_addr(mr);
2821
2822 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002823 len -= l;
2824 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002825 done += l;
2826 if (len == 0) {
2827 break;
2828 }
2829
2830 l = len;
2831 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2832 if (this_mr != mr || xlat != base + done) {
2833 break;
2834 }
aliguori6d16c2f2009-01-22 16:59:11 +00002835 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002836
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002837 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002838 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002839 ptr = qemu_ram_ptr_length(raddr + base, plen);
2840 rcu_read_unlock();
2841
2842 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002843}
2844
Avi Kivityac1970f2012-10-03 16:22:53 +02002845/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002846 * Will also mark the memory as dirty if is_write == 1. access_len gives
2847 * the amount of memory that was actually read or written by the caller.
2848 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002849void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2850 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002851{
2852 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002853 MemoryRegion *mr;
2854 ram_addr_t addr1;
2855
2856 mr = qemu_ram_addr_from_host(buffer, &addr1);
2857 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002858 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002859 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002860 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002861 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002862 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002863 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002864 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002865 return;
2866 }
2867 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002868 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2869 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002870 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002871 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002872 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002873 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002874 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002875 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002876}
bellardd0ecd2a2006-04-23 17:14:48 +00002877
Avi Kivitya8170e52012-10-23 12:30:10 +02002878void *cpu_physical_memory_map(hwaddr addr,
2879 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002880 int is_write)
2881{
2882 return address_space_map(&address_space_memory, addr, plen, is_write);
2883}
2884
Avi Kivitya8170e52012-10-23 12:30:10 +02002885void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2886 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002887{
2888 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2889}
2890
bellard8df1cd02005-01-28 22:37:22 +00002891/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002892static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2893 MemTxAttrs attrs,
2894 MemTxResult *result,
2895 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002896{
bellard8df1cd02005-01-28 22:37:22 +00002897 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002898 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002899 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002900 hwaddr l = 4;
2901 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002902 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002903 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002904
Paolo Bonzini41063e12015-03-18 14:21:43 +01002905 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002906 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002907 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002908 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002909
bellard8df1cd02005-01-28 22:37:22 +00002910 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002911 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002912#if defined(TARGET_WORDS_BIGENDIAN)
2913 if (endian == DEVICE_LITTLE_ENDIAN) {
2914 val = bswap32(val);
2915 }
2916#else
2917 if (endian == DEVICE_BIG_ENDIAN) {
2918 val = bswap32(val);
2919 }
2920#endif
bellard8df1cd02005-01-28 22:37:22 +00002921 } else {
2922 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002923 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002924 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002925 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002926 switch (endian) {
2927 case DEVICE_LITTLE_ENDIAN:
2928 val = ldl_le_p(ptr);
2929 break;
2930 case DEVICE_BIG_ENDIAN:
2931 val = ldl_be_p(ptr);
2932 break;
2933 default:
2934 val = ldl_p(ptr);
2935 break;
2936 }
Peter Maydell50013112015-04-26 16:49:24 +01002937 r = MEMTX_OK;
2938 }
2939 if (result) {
2940 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002941 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002942 if (release_lock) {
2943 qemu_mutex_unlock_iothread();
2944 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002945 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002946 return val;
2947}
2948
Peter Maydell50013112015-04-26 16:49:24 +01002949uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2950 MemTxAttrs attrs, MemTxResult *result)
2951{
2952 return address_space_ldl_internal(as, addr, attrs, result,
2953 DEVICE_NATIVE_ENDIAN);
2954}
2955
2956uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2957 MemTxAttrs attrs, MemTxResult *result)
2958{
2959 return address_space_ldl_internal(as, addr, attrs, result,
2960 DEVICE_LITTLE_ENDIAN);
2961}
2962
2963uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2964 MemTxAttrs attrs, MemTxResult *result)
2965{
2966 return address_space_ldl_internal(as, addr, attrs, result,
2967 DEVICE_BIG_ENDIAN);
2968}
2969
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002970uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002971{
Peter Maydell50013112015-04-26 16:49:24 +01002972 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002973}
2974
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002975uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002976{
Peter Maydell50013112015-04-26 16:49:24 +01002977 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002978}
2979
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002980uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002981{
Peter Maydell50013112015-04-26 16:49:24 +01002982 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002983}
2984
bellard84b7b8e2005-11-28 21:19:04 +00002985/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002986static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2987 MemTxAttrs attrs,
2988 MemTxResult *result,
2989 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002990{
bellard84b7b8e2005-11-28 21:19:04 +00002991 uint8_t *ptr;
2992 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002993 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002994 hwaddr l = 8;
2995 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002996 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002997 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002998
Paolo Bonzini41063e12015-03-18 14:21:43 +01002999 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003000 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003001 false);
3002 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003003 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003004
bellard84b7b8e2005-11-28 21:19:04 +00003005 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003006 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003007#if defined(TARGET_WORDS_BIGENDIAN)
3008 if (endian == DEVICE_LITTLE_ENDIAN) {
3009 val = bswap64(val);
3010 }
3011#else
3012 if (endian == DEVICE_BIG_ENDIAN) {
3013 val = bswap64(val);
3014 }
3015#endif
bellard84b7b8e2005-11-28 21:19:04 +00003016 } else {
3017 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003018 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003019 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003020 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003021 switch (endian) {
3022 case DEVICE_LITTLE_ENDIAN:
3023 val = ldq_le_p(ptr);
3024 break;
3025 case DEVICE_BIG_ENDIAN:
3026 val = ldq_be_p(ptr);
3027 break;
3028 default:
3029 val = ldq_p(ptr);
3030 break;
3031 }
Peter Maydell50013112015-04-26 16:49:24 +01003032 r = MEMTX_OK;
3033 }
3034 if (result) {
3035 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003036 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003037 if (release_lock) {
3038 qemu_mutex_unlock_iothread();
3039 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003040 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003041 return val;
3042}
3043
Peter Maydell50013112015-04-26 16:49:24 +01003044uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3045 MemTxAttrs attrs, MemTxResult *result)
3046{
3047 return address_space_ldq_internal(as, addr, attrs, result,
3048 DEVICE_NATIVE_ENDIAN);
3049}
3050
3051uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3052 MemTxAttrs attrs, MemTxResult *result)
3053{
3054 return address_space_ldq_internal(as, addr, attrs, result,
3055 DEVICE_LITTLE_ENDIAN);
3056}
3057
3058uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3059 MemTxAttrs attrs, MemTxResult *result)
3060{
3061 return address_space_ldq_internal(as, addr, attrs, result,
3062 DEVICE_BIG_ENDIAN);
3063}
3064
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003065uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003066{
Peter Maydell50013112015-04-26 16:49:24 +01003067 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003068}
3069
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003070uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003071{
Peter Maydell50013112015-04-26 16:49:24 +01003072 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003073}
3074
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003075uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003076{
Peter Maydell50013112015-04-26 16:49:24 +01003077 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003078}
3079
bellardaab33092005-10-30 20:48:42 +00003080/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003081uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3082 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003083{
3084 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003085 MemTxResult r;
3086
3087 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3088 if (result) {
3089 *result = r;
3090 }
bellardaab33092005-10-30 20:48:42 +00003091 return val;
3092}
3093
Peter Maydell50013112015-04-26 16:49:24 +01003094uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3095{
3096 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3097}
3098
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003099/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003100static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3101 hwaddr addr,
3102 MemTxAttrs attrs,
3103 MemTxResult *result,
3104 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003105{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003106 uint8_t *ptr;
3107 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003108 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003109 hwaddr l = 2;
3110 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003111 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003112 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003113
Paolo Bonzini41063e12015-03-18 14:21:43 +01003114 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003115 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003116 false);
3117 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003118 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003119
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003120 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003121 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003122#if defined(TARGET_WORDS_BIGENDIAN)
3123 if (endian == DEVICE_LITTLE_ENDIAN) {
3124 val = bswap16(val);
3125 }
3126#else
3127 if (endian == DEVICE_BIG_ENDIAN) {
3128 val = bswap16(val);
3129 }
3130#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003131 } else {
3132 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003133 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003134 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003135 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003136 switch (endian) {
3137 case DEVICE_LITTLE_ENDIAN:
3138 val = lduw_le_p(ptr);
3139 break;
3140 case DEVICE_BIG_ENDIAN:
3141 val = lduw_be_p(ptr);
3142 break;
3143 default:
3144 val = lduw_p(ptr);
3145 break;
3146 }
Peter Maydell50013112015-04-26 16:49:24 +01003147 r = MEMTX_OK;
3148 }
3149 if (result) {
3150 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003151 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003152 if (release_lock) {
3153 qemu_mutex_unlock_iothread();
3154 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003155 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003156 return val;
bellardaab33092005-10-30 20:48:42 +00003157}
3158
Peter Maydell50013112015-04-26 16:49:24 +01003159uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3160 MemTxAttrs attrs, MemTxResult *result)
3161{
3162 return address_space_lduw_internal(as, addr, attrs, result,
3163 DEVICE_NATIVE_ENDIAN);
3164}
3165
3166uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3167 MemTxAttrs attrs, MemTxResult *result)
3168{
3169 return address_space_lduw_internal(as, addr, attrs, result,
3170 DEVICE_LITTLE_ENDIAN);
3171}
3172
3173uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3174 MemTxAttrs attrs, MemTxResult *result)
3175{
3176 return address_space_lduw_internal(as, addr, attrs, result,
3177 DEVICE_BIG_ENDIAN);
3178}
3179
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003180uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003181{
Peter Maydell50013112015-04-26 16:49:24 +01003182 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003183}
3184
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003185uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003186{
Peter Maydell50013112015-04-26 16:49:24 +01003187 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003188}
3189
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003190uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003191{
Peter Maydell50013112015-04-26 16:49:24 +01003192 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003193}
3194
bellard8df1cd02005-01-28 22:37:22 +00003195/* warning: addr must be aligned. The ram page is not masked as dirty
3196 and the code inside is not invalidated. It is useful if the dirty
3197 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003198void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3199 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003200{
bellard8df1cd02005-01-28 22:37:22 +00003201 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003202 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003203 hwaddr l = 4;
3204 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003205 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003206 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003207 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003208
Paolo Bonzini41063e12015-03-18 14:21:43 +01003209 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003210 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003211 true);
3212 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003213 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003214
Peter Maydell50013112015-04-26 16:49:24 +01003215 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003216 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003217 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003218 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003219 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003220
Paolo Bonzini845b6212015-03-23 11:45:53 +01003221 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3222 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003223 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003224 r = MEMTX_OK;
3225 }
3226 if (result) {
3227 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003228 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003229 if (release_lock) {
3230 qemu_mutex_unlock_iothread();
3231 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003232 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003233}
3234
Peter Maydell50013112015-04-26 16:49:24 +01003235void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3236{
3237 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3238}
3239
bellard8df1cd02005-01-28 22:37:22 +00003240/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003241static inline void address_space_stl_internal(AddressSpace *as,
3242 hwaddr addr, uint32_t val,
3243 MemTxAttrs attrs,
3244 MemTxResult *result,
3245 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003246{
bellard8df1cd02005-01-28 22:37:22 +00003247 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003248 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003249 hwaddr l = 4;
3250 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003251 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003252 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003253
Paolo Bonzini41063e12015-03-18 14:21:43 +01003254 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003255 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003256 true);
3257 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003258 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003259
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260#if defined(TARGET_WORDS_BIGENDIAN)
3261 if (endian == DEVICE_LITTLE_ENDIAN) {
3262 val = bswap32(val);
3263 }
3264#else
3265 if (endian == DEVICE_BIG_ENDIAN) {
3266 val = bswap32(val);
3267 }
3268#endif
Peter Maydell50013112015-04-26 16:49:24 +01003269 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003270 } else {
bellard8df1cd02005-01-28 22:37:22 +00003271 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003272 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003273 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003274 switch (endian) {
3275 case DEVICE_LITTLE_ENDIAN:
3276 stl_le_p(ptr, val);
3277 break;
3278 case DEVICE_BIG_ENDIAN:
3279 stl_be_p(ptr, val);
3280 break;
3281 default:
3282 stl_p(ptr, val);
3283 break;
3284 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003285 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003286 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003287 }
Peter Maydell50013112015-04-26 16:49:24 +01003288 if (result) {
3289 *result = r;
3290 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003291 if (release_lock) {
3292 qemu_mutex_unlock_iothread();
3293 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003294 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003295}
3296
3297void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3298 MemTxAttrs attrs, MemTxResult *result)
3299{
3300 address_space_stl_internal(as, addr, val, attrs, result,
3301 DEVICE_NATIVE_ENDIAN);
3302}
3303
3304void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3305 MemTxAttrs attrs, MemTxResult *result)
3306{
3307 address_space_stl_internal(as, addr, val, attrs, result,
3308 DEVICE_LITTLE_ENDIAN);
3309}
3310
3311void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3312 MemTxAttrs attrs, MemTxResult *result)
3313{
3314 address_space_stl_internal(as, addr, val, attrs, result,
3315 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003316}
3317
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003318void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003319{
Peter Maydell50013112015-04-26 16:49:24 +01003320 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003321}
3322
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003323void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003324{
Peter Maydell50013112015-04-26 16:49:24 +01003325 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003326}
3327
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003328void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003329{
Peter Maydell50013112015-04-26 16:49:24 +01003330 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003331}
3332
bellardaab33092005-10-30 20:48:42 +00003333/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003334void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3335 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003336{
3337 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003338 MemTxResult r;
3339
3340 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3341 if (result) {
3342 *result = r;
3343 }
3344}
3345
3346void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3347{
3348 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003349}
3350
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003351/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003352static inline void address_space_stw_internal(AddressSpace *as,
3353 hwaddr addr, uint32_t val,
3354 MemTxAttrs attrs,
3355 MemTxResult *result,
3356 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003357{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003358 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003359 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003360 hwaddr l = 2;
3361 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003362 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003363 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003364
Paolo Bonzini41063e12015-03-18 14:21:43 +01003365 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003366 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003367 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003368 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003369
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370#if defined(TARGET_WORDS_BIGENDIAN)
3371 if (endian == DEVICE_LITTLE_ENDIAN) {
3372 val = bswap16(val);
3373 }
3374#else
3375 if (endian == DEVICE_BIG_ENDIAN) {
3376 val = bswap16(val);
3377 }
3378#endif
Peter Maydell50013112015-04-26 16:49:24 +01003379 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003380 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003381 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003382 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003383 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003384 switch (endian) {
3385 case DEVICE_LITTLE_ENDIAN:
3386 stw_le_p(ptr, val);
3387 break;
3388 case DEVICE_BIG_ENDIAN:
3389 stw_be_p(ptr, val);
3390 break;
3391 default:
3392 stw_p(ptr, val);
3393 break;
3394 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003395 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003396 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003397 }
Peter Maydell50013112015-04-26 16:49:24 +01003398 if (result) {
3399 *result = r;
3400 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003401 if (release_lock) {
3402 qemu_mutex_unlock_iothread();
3403 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003404 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003405}
3406
3407void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3408 MemTxAttrs attrs, MemTxResult *result)
3409{
3410 address_space_stw_internal(as, addr, val, attrs, result,
3411 DEVICE_NATIVE_ENDIAN);
3412}
3413
3414void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3415 MemTxAttrs attrs, MemTxResult *result)
3416{
3417 address_space_stw_internal(as, addr, val, attrs, result,
3418 DEVICE_LITTLE_ENDIAN);
3419}
3420
3421void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3422 MemTxAttrs attrs, MemTxResult *result)
3423{
3424 address_space_stw_internal(as, addr, val, attrs, result,
3425 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003426}
3427
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003428void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003429{
Peter Maydell50013112015-04-26 16:49:24 +01003430 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003431}
3432
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003433void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003434{
Peter Maydell50013112015-04-26 16:49:24 +01003435 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003436}
3437
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003438void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003439{
Peter Maydell50013112015-04-26 16:49:24 +01003440 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003441}
3442
bellardaab33092005-10-30 20:48:42 +00003443/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003444void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3445 MemTxAttrs attrs, MemTxResult *result)
3446{
3447 MemTxResult r;
3448 val = tswap64(val);
3449 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3450 if (result) {
3451 *result = r;
3452 }
3453}
3454
3455void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3456 MemTxAttrs attrs, MemTxResult *result)
3457{
3458 MemTxResult r;
3459 val = cpu_to_le64(val);
3460 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3461 if (result) {
3462 *result = r;
3463 }
3464}
3465void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3466 MemTxAttrs attrs, MemTxResult *result)
3467{
3468 MemTxResult r;
3469 val = cpu_to_be64(val);
3470 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3471 if (result) {
3472 *result = r;
3473 }
3474}
3475
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003476void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003477{
Peter Maydell50013112015-04-26 16:49:24 +01003478 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003479}
3480
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003481void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003482{
Peter Maydell50013112015-04-26 16:49:24 +01003483 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003484}
3485
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003486void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003487{
Peter Maydell50013112015-04-26 16:49:24 +01003488 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003489}
3490
aliguori5e2972f2009-03-28 17:51:36 +00003491/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003492int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003493 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003494{
3495 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003496 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003497 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003498
3499 while (len > 0) {
3500 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003501 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003502 /* if no physical page mapped, return an error */
3503 if (phys_addr == -1)
3504 return -1;
3505 l = (page + TARGET_PAGE_SIZE) - addr;
3506 if (l > len)
3507 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003508 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003509 if (is_write) {
3510 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3511 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003512 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3513 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003514 }
bellard13eb76e2004-01-24 15:23:36 +00003515 len -= l;
3516 buf += l;
3517 addr += l;
3518 }
3519 return 0;
3520}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003521
3522/*
3523 * Allows code that needs to deal with migration bitmaps etc to still be built
3524 * target independent.
3525 */
3526size_t qemu_target_page_bits(void)
3527{
3528 return TARGET_PAGE_BITS;
3529}
3530
Paul Brooka68fe892010-03-01 00:08:59 +00003531#endif
bellard13eb76e2004-01-24 15:23:36 +00003532
Blue Swirl8e4a4242013-01-06 18:30:17 +00003533/*
3534 * A helper function for the _utterly broken_ virtio device model to find out if
3535 * it's running on a big endian machine. Don't do this at home kids!
3536 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003537bool target_words_bigendian(void);
3538bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003539{
3540#if defined(TARGET_WORDS_BIGENDIAN)
3541 return true;
3542#else
3543 return false;
3544#endif
3545}
3546
Wen Congyang76f35532012-05-07 12:04:18 +08003547#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003548bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003549{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003550 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003551 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003552 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003553
Paolo Bonzini41063e12015-03-18 14:21:43 +01003554 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003555 mr = address_space_translate(&address_space_memory,
3556 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003557
Paolo Bonzini41063e12015-03-18 14:21:43 +01003558 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3559 rcu_read_unlock();
3560 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003561}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003562
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003563int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003564{
3565 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003566 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003567
Mike Day0dc3f442013-09-05 14:41:35 -04003568 rcu_read_lock();
3569 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003570 ret = func(block->idstr, block->host, block->offset,
3571 block->used_length, opaque);
3572 if (ret) {
3573 break;
3574 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003575 }
Mike Day0dc3f442013-09-05 14:41:35 -04003576 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003577 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003578}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003579#endif