blob: 44e6e38f9f1a7919f6203267303f307887583035 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200311static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200312 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000313{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200314 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200315 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200317
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200318 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200319 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200320 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200321 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100323 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200324 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200325
326 if (sections[lp.ptr].size.hi ||
327 range_covers_byte(sections[lp.ptr].offset_within_address_space,
328 sections[lp.ptr].size.lo, addr)) {
329 return &sections[lp.ptr];
330 } else {
331 return &sections[PHYS_SECTION_UNASSIGNED];
332 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200333}
334
Blue Swirle5548612012-04-21 13:08:33 +0000335bool memory_region_is_unassigned(MemoryRegion *mr)
336{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200337 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000338 && mr != &io_mem_watch;
339}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200340
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100341/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200342static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200343 hwaddr addr,
344 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200345{
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 MemoryRegionSection *section;
347 subpage_t *subpage;
348
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200349 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200350 if (resolve_subpage && section->mr->subpage) {
351 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 }
354 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200355}
356
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100357/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200358static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200359address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200361{
362 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200363 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200365
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200366 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367 /* Compute offset within MemoryRegionSection */
368 addr -= section->offset_within_address_space;
369
370 /* Compute offset within MemoryRegion */
371 *xlat = addr + section->offset_within_region;
372
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200373 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200374
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
380 * here.
381 *
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
385 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200387 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 return section;
391}
Jan Kiszka90260c62013-05-26 21:46:51 +0200392
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100393static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
394{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +0100395 if (is_write) {
396 return memory_region_is_ram(mr) && !mr->readonly;
397 } else {
398 return memory_region_is_ram(mr) || memory_region_is_romd(mr);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100399 }
400
401 return false;
402}
403
Paolo Bonzini41063e12015-03-18 14:21:43 +0100404/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200405MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
406 hwaddr *xlat, hwaddr *plen,
407 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200408{
Avi Kivity30951152012-10-30 13:47:46 +0200409 IOMMUTLBEntry iotlb;
410 MemoryRegionSection *section;
411 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200412
413 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100414 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
415 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200416 mr = section->mr;
417
418 if (!mr->iommu_ops) {
419 break;
420 }
421
Le Tan8d7b8cb2014-08-16 13:55:37 +0800422 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200423 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
424 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700425 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200426 if (!(iotlb.perm & (1 << is_write))) {
427 mr = &io_mem_unassigned;
428 break;
429 }
430
431 as = iotlb.target_as;
432 }
433
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000434 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100435 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700436 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100437 }
438
Avi Kivity30951152012-10-30 13:47:46 +0200439 *xlat = addr;
440 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200441}
442
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100443/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200444MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200445address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
446 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200447{
Avi Kivity30951152012-10-30 13:47:46 +0200448 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100449 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200450 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200451
452 assert(!section->mr->iommu_ops);
453 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200454}
bellard9fa3e852004-01-04 18:06:42 +0000455#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000456
Andreas Färberb170fce2013-01-20 20:23:22 +0100457#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000458
Juan Quintelae59fb372009-09-29 22:48:21 +0200459static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200460{
Andreas Färber259186a2013-01-17 18:51:17 +0100461 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200462
aurel323098dba2009-03-07 21:28:24 +0000463 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
464 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100465 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100466 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000467
468 return 0;
469}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200470
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400471static int cpu_common_pre_load(void *opaque)
472{
473 CPUState *cpu = opaque;
474
Paolo Bonziniadee6422014-12-19 12:53:14 +0100475 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400476
477 return 0;
478}
479
480static bool cpu_common_exception_index_needed(void *opaque)
481{
482 CPUState *cpu = opaque;
483
Paolo Bonziniadee6422014-12-19 12:53:14 +0100484 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485}
486
487static const VMStateDescription vmstate_cpu_common_exception_index = {
488 .name = "cpu_common/exception_index",
489 .version_id = 1,
490 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200491 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492 .fields = (VMStateField[]) {
493 VMSTATE_INT32(exception_index, CPUState),
494 VMSTATE_END_OF_LIST()
495 }
496};
497
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300498static bool cpu_common_crash_occurred_needed(void *opaque)
499{
500 CPUState *cpu = opaque;
501
502 return cpu->crash_occurred;
503}
504
505static const VMStateDescription vmstate_cpu_common_crash_occurred = {
506 .name = "cpu_common/crash_occurred",
507 .version_id = 1,
508 .minimum_version_id = 1,
509 .needed = cpu_common_crash_occurred_needed,
510 .fields = (VMStateField[]) {
511 VMSTATE_BOOL(crash_occurred, CPUState),
512 VMSTATE_END_OF_LIST()
513 }
514};
515
Andreas Färber1a1562f2013-06-17 04:09:11 +0200516const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200517 .name = "cpu_common",
518 .version_id = 1,
519 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400520 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200522 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100523 VMSTATE_UINT32(halted, CPUState),
524 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200525 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400526 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200527 .subsections = (const VMStateDescription*[]) {
528 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300529 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200530 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531 }
532};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200533
pbrook9656f322008-07-01 20:01:19 +0000534#endif
535
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100536CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400537{
Andreas Färberbdc44642013-06-24 23:50:24 +0200538 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400539
Andreas Färberbdc44642013-06-24 23:50:24 +0200540 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100541 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200542 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100543 }
Glauber Costa950f1472009-06-09 12:15:18 -0400544 }
545
Andreas Färberbdc44642013-06-24 23:50:24 +0200546 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400547}
548
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000549#if !defined(CONFIG_USER_ONLY)
550void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
551{
552 /* We only support one address space per cpu at the moment. */
553 assert(cpu->as == as);
554
Peter Maydell32857f42015-10-01 15:29:50 +0100555 if (cpu->cpu_ases) {
556 /* We've already registered the listener for our only AS */
557 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000558 }
Peter Maydell32857f42015-10-01 15:29:50 +0100559
560 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
561 cpu->cpu_ases[0].cpu = cpu;
562 cpu->cpu_ases[0].as = as;
563 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
564 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565}
566#endif
567
Bharata B Raob7bca732015-06-23 19:31:13 -0700568#ifndef CONFIG_USER_ONLY
569static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
570
571static int cpu_get_free_index(Error **errp)
572{
573 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
574
575 if (cpu >= MAX_CPUMASK_BITS) {
576 error_setg(errp, "Trying to use more CPUs than max of %d",
577 MAX_CPUMASK_BITS);
578 return -1;
579 }
580
581 bitmap_set(cpu_index_map, cpu, 1);
582 return cpu;
583}
584
585void cpu_exec_exit(CPUState *cpu)
586{
587 if (cpu->cpu_index == -1) {
588 /* cpu_index was never allocated by this @cpu or was already freed. */
589 return;
590 }
591
592 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
593 cpu->cpu_index = -1;
594}
595#else
596
597static int cpu_get_free_index(Error **errp)
598{
599 CPUState *some_cpu;
600 int cpu_index = 0;
601
602 CPU_FOREACH(some_cpu) {
603 cpu_index++;
604 }
605 return cpu_index;
606}
607
608void cpu_exec_exit(CPUState *cpu)
609{
610}
611#endif
612
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700613void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000614{
Andreas Färberb170fce2013-01-20 20:23:22 +0100615 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000616 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700617 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000618
Eduardo Habkost291135b2015-04-27 17:00:33 -0300619#ifndef CONFIG_USER_ONLY
620 cpu->as = &address_space_memory;
621 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300622#endif
623
pbrookc2764712009-03-07 15:24:59 +0000624#if defined(CONFIG_USER_ONLY)
625 cpu_list_lock();
626#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700627 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
628 if (local_err) {
629 error_propagate(errp, local_err);
630#if defined(CONFIG_USER_ONLY)
631 cpu_list_unlock();
632#endif
633 return;
bellard6a00d602005-11-21 23:25:50 +0000634 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200635 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000636#if defined(CONFIG_USER_ONLY)
637 cpu_list_unlock();
638#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200639 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
640 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
641 }
pbrookb3c77242008-06-30 16:31:04 +0000642#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600643 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700644 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100645 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200646 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000647#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100648 if (cc->vmsd != NULL) {
649 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
650 }
bellardfd6ce8f2003-05-14 19:00:11 +0000651}
652
Paul Brook94df27f2010-02-28 23:47:45 +0000653#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200654static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000655{
656 tb_invalidate_phys_page_range(pc, pc + 1, 0);
657}
658#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200659static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400660{
Max Filippove8262a12013-09-27 22:29:17 +0400661 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
662 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000663 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100664 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400665 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400666}
bellardc27004e2005-01-03 23:35:10 +0000667#endif
bellardd720b932004-04-25 17:57:43 +0000668
Paul Brookc527ee82010-03-01 03:31:14 +0000669#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200670void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000671
672{
673}
674
Peter Maydell3ee887e2014-09-12 14:06:48 +0100675int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
676 int flags)
677{
678 return -ENOSYS;
679}
680
681void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
682{
683}
684
Andreas Färber75a34032013-09-02 16:57:02 +0200685int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000686 int flags, CPUWatchpoint **watchpoint)
687{
688 return -ENOSYS;
689}
690#else
pbrook6658ffb2007-03-16 23:58:11 +0000691/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200692int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000693 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000694{
aliguoric0ce9982008-11-25 22:13:57 +0000695 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000696
Peter Maydell05068c02014-09-12 14:06:48 +0100697 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700698 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200699 error_report("tried to set invalid watchpoint at %"
700 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000701 return -EINVAL;
702 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500703 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000704
aliguoria1d1bb32008-11-18 20:07:32 +0000705 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100706 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000707 wp->flags = flags;
708
aliguori2dc9f412008-11-18 20:56:59 +0000709 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200710 if (flags & BP_GDB) {
711 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
712 } else {
713 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
714 }
aliguoria1d1bb32008-11-18 20:07:32 +0000715
Andreas Färber31b030d2013-09-04 01:29:02 +0200716 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000717
718 if (watchpoint)
719 *watchpoint = wp;
720 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000721}
722
aliguoria1d1bb32008-11-18 20:07:32 +0000723/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200724int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000725 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000726{
aliguoria1d1bb32008-11-18 20:07:32 +0000727 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000728
Andreas Färberff4700b2013-08-26 18:23:18 +0200729 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100730 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000731 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200732 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000733 return 0;
734 }
735 }
aliguoria1d1bb32008-11-18 20:07:32 +0000736 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000737}
738
aliguoria1d1bb32008-11-18 20:07:32 +0000739/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200740void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000741{
Andreas Färberff4700b2013-08-26 18:23:18 +0200742 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000743
Andreas Färber31b030d2013-09-04 01:29:02 +0200744 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000745
Anthony Liguori7267c092011-08-20 22:09:37 -0500746 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000747}
748
aliguoria1d1bb32008-11-18 20:07:32 +0000749/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200750void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000751{
aliguoric0ce9982008-11-25 22:13:57 +0000752 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000753
Andreas Färberff4700b2013-08-26 18:23:18 +0200754 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200755 if (wp->flags & mask) {
756 cpu_watchpoint_remove_by_ref(cpu, wp);
757 }
aliguoric0ce9982008-11-25 22:13:57 +0000758 }
aliguoria1d1bb32008-11-18 20:07:32 +0000759}
Peter Maydell05068c02014-09-12 14:06:48 +0100760
761/* Return true if this watchpoint address matches the specified
762 * access (ie the address range covered by the watchpoint overlaps
763 * partially or completely with the address range covered by the
764 * access).
765 */
766static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
767 vaddr addr,
768 vaddr len)
769{
770 /* We know the lengths are non-zero, but a little caution is
771 * required to avoid errors in the case where the range ends
772 * exactly at the top of the address space and so addr + len
773 * wraps round to zero.
774 */
775 vaddr wpend = wp->vaddr + wp->len - 1;
776 vaddr addrend = addr + len - 1;
777
778 return !(addr > wpend || wp->vaddr > addrend);
779}
780
Paul Brookc527ee82010-03-01 03:31:14 +0000781#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000782
783/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200784int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000785 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000786{
aliguoric0ce9982008-11-25 22:13:57 +0000787 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000788
Anthony Liguori7267c092011-08-20 22:09:37 -0500789 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000790
791 bp->pc = pc;
792 bp->flags = flags;
793
aliguori2dc9f412008-11-18 20:56:59 +0000794 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200795 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200796 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200797 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200798 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 }
aliguoria1d1bb32008-11-18 20:07:32 +0000800
Andreas Färberf0c3c502013-08-26 21:22:53 +0200801 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000802
Andreas Färber00b941e2013-06-29 18:55:54 +0200803 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000804 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200805 }
aliguoria1d1bb32008-11-18 20:07:32 +0000806 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000807}
808
809/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200810int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000811{
aliguoria1d1bb32008-11-18 20:07:32 +0000812 CPUBreakpoint *bp;
813
Andreas Färberf0c3c502013-08-26 21:22:53 +0200814 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000815 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200816 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000817 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000818 }
bellard4c3a88a2003-07-26 12:06:08 +0000819 }
aliguoria1d1bb32008-11-18 20:07:32 +0000820 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000821}
822
aliguoria1d1bb32008-11-18 20:07:32 +0000823/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200824void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000825{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200826 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
827
828 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000829
Anthony Liguori7267c092011-08-20 22:09:37 -0500830 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000831}
832
833/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200834void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000835{
aliguoric0ce9982008-11-25 22:13:57 +0000836 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000837
Andreas Färberf0c3c502013-08-26 21:22:53 +0200838 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200839 if (bp->flags & mask) {
840 cpu_breakpoint_remove_by_ref(cpu, bp);
841 }
aliguoric0ce9982008-11-25 22:13:57 +0000842 }
bellard4c3a88a2003-07-26 12:06:08 +0000843}
844
bellardc33a3462003-07-29 20:50:33 +0000845/* enable or disable single step mode. EXCP_DEBUG is returned by the
846 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200847void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000848{
Andreas Färbered2803d2013-06-21 20:20:45 +0200849 if (cpu->singlestep_enabled != enabled) {
850 cpu->singlestep_enabled = enabled;
851 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200852 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200853 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100854 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000855 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700856 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000857 }
bellardc33a3462003-07-29 20:50:33 +0000858 }
bellardc33a3462003-07-29 20:50:33 +0000859}
860
Andreas Färbera47dddd2013-09-03 17:38:47 +0200861void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000862{
863 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000864 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000865
866 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000867 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000868 fprintf(stderr, "qemu: fatal: ");
869 vfprintf(stderr, fmt, ap);
870 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200871 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100872 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000873 qemu_log("qemu: fatal: ");
874 qemu_log_vprintf(fmt, ap2);
875 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200876 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000877 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000878 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000879 }
pbrook493ae1f2007-11-23 16:53:59 +0000880 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000881 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300882 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200883#if defined(CONFIG_USER_ONLY)
884 {
885 struct sigaction act;
886 sigfillset(&act.sa_mask);
887 act.sa_handler = SIG_DFL;
888 sigaction(SIGABRT, &act, NULL);
889 }
890#endif
bellard75012672003-06-21 13:11:07 +0000891 abort();
892}
893
bellard01243112004-01-04 15:48:17 +0000894#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400895/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200896static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
897{
898 RAMBlock *block;
899
Paolo Bonzini43771532013-09-09 17:58:40 +0200900 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200901 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200902 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200903 }
Mike Day0dc3f442013-09-05 14:41:35 -0400904 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200905 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200906 goto found;
907 }
908 }
909
910 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
911 abort();
912
913found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200914 /* It is safe to write mru_block outside the iothread lock. This
915 * is what happens:
916 *
917 * mru_block = xxx
918 * rcu_read_unlock()
919 * xxx removed from list
920 * rcu_read_lock()
921 * read mru_block
922 * mru_block = NULL;
923 * call_rcu(reclaim_ramblock, xxx);
924 * rcu_read_unlock()
925 *
926 * atomic_rcu_set is not needed here. The block was already published
927 * when it was placed into the list. Here we're just making an extra
928 * copy of the pointer.
929 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200930 ram_list.mru_block = block;
931 return block;
932}
933
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200934static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000935{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700936 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200937 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200938 RAMBlock *block;
939 ram_addr_t end;
940
941 end = TARGET_PAGE_ALIGN(start + length);
942 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000943
Mike Day0dc3f442013-09-05 14:41:35 -0400944 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200945 block = qemu_get_ram_block(start);
946 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200947 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700948 CPU_FOREACH(cpu) {
949 tlb_reset_dirty(cpu, start1, length);
950 }
Mike Day0dc3f442013-09-05 14:41:35 -0400951 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200952}
953
954/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000955bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
956 ram_addr_t length,
957 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200958{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000959 unsigned long end, page;
960 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200961
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000962 if (length == 0) {
963 return false;
964 }
965
966 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
967 page = start >> TARGET_PAGE_BITS;
968 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
969 page, end - page);
970
971 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200972 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200973 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000974
975 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000976}
977
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100978/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200979hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200980 MemoryRegionSection *section,
981 target_ulong vaddr,
982 hwaddr paddr, hwaddr xlat,
983 int prot,
984 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000985{
Avi Kivitya8170e52012-10-23 12:30:10 +0200986 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000987 CPUWatchpoint *wp;
988
Blue Swirlcc5bea62012-04-14 14:56:48 +0000989 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000990 /* Normal RAM. */
991 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200992 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000993 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200994 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000995 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200996 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000997 }
998 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +0100999 AddressSpaceDispatch *d;
1000
1001 d = atomic_rcu_read(&section->address_space->dispatch);
1002 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001003 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001004 }
1005
1006 /* Make accesses to pages with watchpoints go via the
1007 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001008 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001009 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001010 /* Avoid trapping reads of pages with a write breakpoint. */
1011 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001012 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001013 *address |= TLB_MMIO;
1014 break;
1015 }
1016 }
1017 }
1018
1019 return iotlb;
1020}
bellard9fa3e852004-01-04 18:06:42 +00001021#endif /* defined(CONFIG_USER_ONLY) */
1022
pbrooke2eef172008-06-08 01:09:01 +00001023#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001024
Anthony Liguoric227f092009-10-01 16:12:16 -05001025static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001026 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001027static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001028
Igor Mammedova2b257d2014-10-31 16:38:37 +00001029static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1030 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001031
1032/*
1033 * Set a custom physical guest memory alloator.
1034 * Accelerators with unusual needs may need this. Hopefully, we can
1035 * get rid of it eventually.
1036 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001037void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001038{
1039 phys_mem_alloc = alloc;
1040}
1041
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001042static uint16_t phys_section_add(PhysPageMap *map,
1043 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001044{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001045 /* The physical section number is ORed with a page-aligned
1046 * pointer to produce the iotlb entries. Thus it should
1047 * never overflow into the page-aligned value.
1048 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001049 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001050
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001051 if (map->sections_nb == map->sections_nb_alloc) {
1052 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1053 map->sections = g_renew(MemoryRegionSection, map->sections,
1054 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001055 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001056 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001057 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001058 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001059}
1060
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001061static void phys_section_destroy(MemoryRegion *mr)
1062{
Don Slutz55b4e802015-11-30 17:11:04 -05001063 bool have_sub_page = mr->subpage;
1064
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001065 memory_region_unref(mr);
1066
Don Slutz55b4e802015-11-30 17:11:04 -05001067 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001068 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001069 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001070 g_free(subpage);
1071 }
1072}
1073
Paolo Bonzini60926662013-05-29 12:30:26 +02001074static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001075{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001076 while (map->sections_nb > 0) {
1077 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001078 phys_section_destroy(section->mr);
1079 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001080 g_free(map->sections);
1081 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001082}
1083
Avi Kivityac1970f2012-10-03 16:22:53 +02001084static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085{
1086 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001087 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001088 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001089 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001090 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001091 MemoryRegionSection subsection = {
1092 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001093 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001094 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001095 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096
Avi Kivityf3705d52012-03-08 16:16:34 +02001097 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098
Avi Kivityf3705d52012-03-08 16:16:34 +02001099 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001100 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001101 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001103 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001105 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001106 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 }
1108 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001110 subpage_register(subpage, start, end,
1111 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112}
1113
1114
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001115static void register_multipage(AddressSpaceDispatch *d,
1116 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001117{
Avi Kivitya8170e52012-10-23 12:30:10 +02001118 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001119 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001120 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1121 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001122
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001123 assert(num_pages);
1124 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001125}
1126
Avi Kivityac1970f2012-10-03 16:22:53 +02001127static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001128{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001129 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001130 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001131 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001133
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001134 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1135 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1136 - now.offset_within_address_space;
1137
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001138 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001139 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001140 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001141 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001142 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 while (int128_ne(remain.size, now.size)) {
1144 remain.size = int128_sub(remain.size, now.size);
1145 remain.offset_within_address_space += int128_get64(now.size);
1146 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001147 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001149 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001150 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001151 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001152 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001153 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001155 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001156 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 }
1158}
1159
Sheng Yang62a27442010-01-26 19:21:16 +08001160void qemu_flush_coalesced_mmio_buffer(void)
1161{
1162 if (kvm_enabled())
1163 kvm_flush_coalesced_mmio_buffer();
1164}
1165
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001166void qemu_mutex_lock_ramlist(void)
1167{
1168 qemu_mutex_lock(&ram_list.mutex);
1169}
1170
1171void qemu_mutex_unlock_ramlist(void)
1172{
1173 qemu_mutex_unlock(&ram_list.mutex);
1174}
1175
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001176#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001177
1178#include <sys/vfs.h>
1179
1180#define HUGETLBFS_MAGIC 0x958458f6
1181
Hu Taofc7a5802014-09-09 13:28:01 +08001182static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001183{
1184 struct statfs fs;
1185 int ret;
1186
1187 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001188 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001189 } while (ret != 0 && errno == EINTR);
1190
1191 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001192 error_setg_errno(errp, errno, "failed to get page size of file %s",
1193 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001194 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195 }
1196
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197 return fs.f_bsize;
1198}
1199
Alex Williamson04b16652010-07-02 11:13:17 -06001200static void *file_ram_alloc(RAMBlock *block,
1201 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001202 const char *path,
1203 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001204{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001205 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001206 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001207 char *sanitized_name;
1208 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001209 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001210 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001211 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001212 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001213
Hu Taofc7a5802014-09-09 13:28:01 +08001214 hpagesize = gethugepagesize(path, &local_err);
1215 if (local_err) {
1216 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001217 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001218 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001219 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001220
1221 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001222 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1223 "or larger than huge page size 0x%" PRIx64,
1224 memory, hpagesize);
1225 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001226 }
1227
1228 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001229 error_setg(errp,
1230 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001231 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001232 }
1233
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001234 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1235 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1236 sanitized_name = g_strdup(memory_region_name(block->mr));
1237 for (c = sanitized_name; *c != '\0'; c++) {
1238 if (*c == '/') {
1239 *c = '_';
1240 }
1241 }
1242
1243 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1244 sanitized_name);
1245 g_free(sanitized_name);
1246
1247 fd = mkstemp(filename);
1248 if (fd >= 0) {
1249 unlink(filename);
1250 }
1251 g_free(filename);
1252 } else {
1253 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001254 }
1255
Marcelo Tosattic9027602010-03-01 20:25:08 -03001256 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001257 error_setg_errno(errp, errno,
1258 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001259 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001260 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001261
Chen Hanxiao9284f312015-07-24 11:12:03 +08001262 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001263
1264 /*
1265 * ftruncate is not supported by hugetlbfs in older
1266 * hosts, so don't bother bailing out on errors.
1267 * If anything goes wrong with it under other filesystems,
1268 * mmap will fail.
1269 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001270 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001271 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001272 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001273
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001274 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001275 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001276 error_setg_errno(errp, errno,
1277 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001278 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001279 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001280 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001281
1282 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001283 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001284 }
1285
Alex Williamson04b16652010-07-02 11:13:17 -06001286 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001287 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001288
1289error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001290 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001291}
1292#endif
1293
Mike Day0dc3f442013-09-05 14:41:35 -04001294/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001295static ram_addr_t find_ram_offset(ram_addr_t size)
1296{
Alex Williamson04b16652010-07-02 11:13:17 -06001297 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001298 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001299
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001300 assert(size != 0); /* it would hand out same offset multiple times */
1301
Mike Day0dc3f442013-09-05 14:41:35 -04001302 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001303 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001304 }
Alex Williamson04b16652010-07-02 11:13:17 -06001305
Mike Day0dc3f442013-09-05 14:41:35 -04001306 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001307 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001308
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001309 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001310
Mike Day0dc3f442013-09-05 14:41:35 -04001311 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001312 if (next_block->offset >= end) {
1313 next = MIN(next, next_block->offset);
1314 }
1315 }
1316 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001317 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001318 mingap = next - end;
1319 }
1320 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001321
1322 if (offset == RAM_ADDR_MAX) {
1323 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1324 (uint64_t)size);
1325 abort();
1326 }
1327
Alex Williamson04b16652010-07-02 11:13:17 -06001328 return offset;
1329}
1330
Juan Quintela652d7ec2012-07-20 10:37:54 +02001331ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001332{
Alex Williamsond17b5282010-06-25 11:08:38 -06001333 RAMBlock *block;
1334 ram_addr_t last = 0;
1335
Mike Day0dc3f442013-09-05 14:41:35 -04001336 rcu_read_lock();
1337 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001338 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001339 }
Mike Day0dc3f442013-09-05 14:41:35 -04001340 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001341 return last;
1342}
1343
Jason Baronddb97f12012-08-02 15:44:16 -04001344static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1345{
1346 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001347
1348 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001349 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001350 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1351 if (ret) {
1352 perror("qemu_madvise");
1353 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1354 "but dump_guest_core=off specified\n");
1355 }
1356 }
1357}
1358
Mike Day0dc3f442013-09-05 14:41:35 -04001359/* Called within an RCU critical section, or while the ramlist lock
1360 * is held.
1361 */
Hu Tao20cfe882014-04-02 15:13:26 +08001362static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001363{
Hu Tao20cfe882014-04-02 15:13:26 +08001364 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001365
Mike Day0dc3f442013-09-05 14:41:35 -04001366 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001367 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001368 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001369 }
1370 }
Hu Tao20cfe882014-04-02 15:13:26 +08001371
1372 return NULL;
1373}
1374
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001375const char *qemu_ram_get_idstr(RAMBlock *rb)
1376{
1377 return rb->idstr;
1378}
1379
Mike Dayae3a7042013-09-05 14:41:35 -04001380/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001381void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1382{
Mike Dayae3a7042013-09-05 14:41:35 -04001383 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001384
Mike Day0dc3f442013-09-05 14:41:35 -04001385 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001386 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001387 assert(new_block);
1388 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001389
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001390 if (dev) {
1391 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001392 if (id) {
1393 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001394 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001395 }
1396 }
1397 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1398
Mike Day0dc3f442013-09-05 14:41:35 -04001399 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001400 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001401 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1402 new_block->idstr);
1403 abort();
1404 }
1405 }
Mike Day0dc3f442013-09-05 14:41:35 -04001406 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001407}
1408
Mike Dayae3a7042013-09-05 14:41:35 -04001409/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001410void qemu_ram_unset_idstr(ram_addr_t addr)
1411{
Mike Dayae3a7042013-09-05 14:41:35 -04001412 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001413
Mike Dayae3a7042013-09-05 14:41:35 -04001414 /* FIXME: arch_init.c assumes that this is not called throughout
1415 * migration. Ignore the problem since hot-unplug during migration
1416 * does not work anyway.
1417 */
1418
Mike Day0dc3f442013-09-05 14:41:35 -04001419 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001420 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001421 if (block) {
1422 memset(block->idstr, 0, sizeof(block->idstr));
1423 }
Mike Day0dc3f442013-09-05 14:41:35 -04001424 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001425}
1426
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001427static int memory_try_enable_merging(void *addr, size_t len)
1428{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001429 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001430 /* disabled by the user */
1431 return 0;
1432 }
1433
1434 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1435}
1436
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001437/* Only legal before guest might have detected the memory size: e.g. on
1438 * incoming migration, or right after reset.
1439 *
1440 * As memory core doesn't know how is memory accessed, it is up to
1441 * resize callback to update device state and/or add assertions to detect
1442 * misuse, if necessary.
1443 */
1444int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1445{
1446 RAMBlock *block = find_ram_block(base);
1447
1448 assert(block);
1449
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001450 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001451
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001452 if (block->used_length == newsize) {
1453 return 0;
1454 }
1455
1456 if (!(block->flags & RAM_RESIZEABLE)) {
1457 error_setg_errno(errp, EINVAL,
1458 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1459 " in != 0x" RAM_ADDR_FMT, block->idstr,
1460 newsize, block->used_length);
1461 return -EINVAL;
1462 }
1463
1464 if (block->max_length < newsize) {
1465 error_setg_errno(errp, EINVAL,
1466 "Length too large: %s: 0x" RAM_ADDR_FMT
1467 " > 0x" RAM_ADDR_FMT, block->idstr,
1468 newsize, block->max_length);
1469 return -EINVAL;
1470 }
1471
1472 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1473 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001474 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1475 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001476 memory_region_set_size(block->mr, newsize);
1477 if (block->resized) {
1478 block->resized(block->idstr, newsize, block->host);
1479 }
1480 return 0;
1481}
1482
Hu Taoef701d72014-09-09 13:27:54 +08001483static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001484{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001485 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001486 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001487 ram_addr_t old_ram_size, new_ram_size;
1488
1489 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001490
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001491 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001492 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001493
1494 if (!new_block->host) {
1495 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001496 xen_ram_alloc(new_block->offset, new_block->max_length,
1497 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001498 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001499 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001500 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001501 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001502 error_setg_errno(errp, errno,
1503 "cannot set up guest memory '%s'",
1504 memory_region_name(new_block->mr));
1505 qemu_mutex_unlock_ramlist();
1506 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001507 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001508 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001509 }
1510 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001511
Li Zhijiandd631692015-07-02 20:18:06 +08001512 new_ram_size = MAX(old_ram_size,
1513 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1514 if (new_ram_size > old_ram_size) {
1515 migration_bitmap_extend(old_ram_size, new_ram_size);
1516 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001517 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1518 * QLIST (which has an RCU-friendly variant) does not have insertion at
1519 * tail, so save the last element in last_block.
1520 */
Mike Day0dc3f442013-09-05 14:41:35 -04001521 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001522 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001523 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001524 break;
1525 }
1526 }
1527 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001528 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001529 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001530 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001531 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001533 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001534 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001535
Mike Day0dc3f442013-09-05 14:41:35 -04001536 /* Write list before version */
1537 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001538 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001539 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001540
Juan Quintela2152f5c2013-10-08 13:52:02 +02001541 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1542
1543 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001544 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001545
1546 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001547 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1548 ram_list.dirty_memory[i] =
1549 bitmap_zero_extend(ram_list.dirty_memory[i],
1550 old_ram_size, new_ram_size);
1551 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001552 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001553 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001554 new_block->used_length,
1555 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001556
Paolo Bonzinia904c912015-01-21 16:18:35 +01001557 if (new_block->host) {
1558 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1559 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1560 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1561 if (kvm_enabled()) {
1562 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1563 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001564 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001565
1566 return new_block->offset;
1567}
1568
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001569#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001570ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001571 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001572 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001573{
1574 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001575 ram_addr_t addr;
1576 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577
1578 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001579 error_setg(errp, "-mem-path not supported with Xen");
1580 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581 }
1582
1583 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1584 /*
1585 * file_ram_alloc() needs to allocate just like
1586 * phys_mem_alloc, but we haven't bothered to provide
1587 * a hook there.
1588 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001589 error_setg(errp,
1590 "-mem-path not supported with this accelerator");
1591 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592 }
1593
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001594 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001595 new_block = g_malloc0(sizeof(*new_block));
1596 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001597 new_block->used_length = size;
1598 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001599 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001600 new_block->host = file_ram_alloc(new_block, size,
1601 mem_path, errp);
1602 if (!new_block->host) {
1603 g_free(new_block);
1604 return -1;
1605 }
1606
Hu Taoef701d72014-09-09 13:27:54 +08001607 addr = ram_block_add(new_block, &local_err);
1608 if (local_err) {
1609 g_free(new_block);
1610 error_propagate(errp, local_err);
1611 return -1;
1612 }
1613 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001614}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001615#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001616
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001617static
1618ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1619 void (*resized)(const char*,
1620 uint64_t length,
1621 void *host),
1622 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001623 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001624{
1625 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001626 ram_addr_t addr;
1627 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001628
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001629 size = HOST_PAGE_ALIGN(size);
1630 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001631 new_block = g_malloc0(sizeof(*new_block));
1632 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001633 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001634 new_block->used_length = size;
1635 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001636 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001637 new_block->fd = -1;
1638 new_block->host = host;
1639 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001640 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001641 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001642 if (resizeable) {
1643 new_block->flags |= RAM_RESIZEABLE;
1644 }
Hu Taoef701d72014-09-09 13:27:54 +08001645 addr = ram_block_add(new_block, &local_err);
1646 if (local_err) {
1647 g_free(new_block);
1648 error_propagate(errp, local_err);
1649 return -1;
1650 }
1651 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001652}
1653
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001654ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1655 MemoryRegion *mr, Error **errp)
1656{
1657 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1658}
1659
Hu Taoef701d72014-09-09 13:27:54 +08001660ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001661{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001662 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1663}
1664
1665ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1666 void (*resized)(const char*,
1667 uint64_t length,
1668 void *host),
1669 MemoryRegion *mr, Error **errp)
1670{
1671 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001672}
bellarde9a1ab12007-02-08 23:08:38 +00001673
Paolo Bonzini43771532013-09-09 17:58:40 +02001674static void reclaim_ramblock(RAMBlock *block)
1675{
1676 if (block->flags & RAM_PREALLOC) {
1677 ;
1678 } else if (xen_enabled()) {
1679 xen_invalidate_map_cache_entry(block->host);
1680#ifndef _WIN32
1681 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001682 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001683 close(block->fd);
1684#endif
1685 } else {
1686 qemu_anon_ram_free(block->host, block->max_length);
1687 }
1688 g_free(block);
1689}
1690
Anthony Liguoric227f092009-10-01 16:12:16 -05001691void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001692{
Alex Williamson04b16652010-07-02 11:13:17 -06001693 RAMBlock *block;
1694
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001695 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001696 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001697 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001698 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001699 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001700 /* Write list before version */
1701 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001702 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001703 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001704 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001705 }
1706 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001707 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001708}
1709
Huang Yingcd19cfa2011-03-02 08:56:19 +01001710#ifndef _WIN32
1711void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1712{
1713 RAMBlock *block;
1714 ram_addr_t offset;
1715 int flags;
1716 void *area, *vaddr;
1717
Mike Day0dc3f442013-09-05 14:41:35 -04001718 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001719 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001720 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001721 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001722 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001723 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001724 } else if (xen_enabled()) {
1725 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001726 } else {
1727 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001728 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001729 flags |= (block->flags & RAM_SHARED ?
1730 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001731 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1732 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001733 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001734 /*
1735 * Remap needs to match alloc. Accelerators that
1736 * set phys_mem_alloc never remap. If they did,
1737 * we'd need a remap hook here.
1738 */
1739 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1740
Huang Yingcd19cfa2011-03-02 08:56:19 +01001741 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1742 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1743 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001744 }
1745 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001746 fprintf(stderr, "Could not remap addr: "
1747 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001748 length, addr);
1749 exit(1);
1750 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001751 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001752 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001753 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001754 }
1755 }
1756}
1757#endif /* !_WIN32 */
1758
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001759int qemu_get_ram_fd(ram_addr_t addr)
1760{
Mike Dayae3a7042013-09-05 14:41:35 -04001761 RAMBlock *block;
1762 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001763
Mike Day0dc3f442013-09-05 14:41:35 -04001764 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001765 block = qemu_get_ram_block(addr);
1766 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001767 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001768 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001769}
1770
Damjan Marion3fd74b82014-06-26 23:01:32 +02001771void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1772{
Mike Dayae3a7042013-09-05 14:41:35 -04001773 RAMBlock *block;
1774 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001775
Mike Day0dc3f442013-09-05 14:41:35 -04001776 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001777 block = qemu_get_ram_block(addr);
1778 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001779 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001780 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001781}
1782
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001783/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001784 * This should not be used for general purpose DMA. Use address_space_map
1785 * or address_space_rw instead. For local memory (e.g. video ram) that the
1786 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001787 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001788 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001789 */
1790void *qemu_get_ram_ptr(ram_addr_t addr)
1791{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001792 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001793
1794 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001795 /* We need to check if the requested address is in the RAM
1796 * because we don't want to map the entire memory in QEMU.
1797 * In that case just map until the end of the page.
1798 */
1799 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001800 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001801 }
Mike Dayae3a7042013-09-05 14:41:35 -04001802
1803 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001804 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001805 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001806}
1807
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001808/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001809 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001810 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001811 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001812 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001813static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001814{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001815 RAMBlock *block;
1816 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001817 if (*size == 0) {
1818 return NULL;
1819 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001820
1821 block = qemu_get_ram_block(addr);
1822 offset_inside_block = addr - block->offset;
1823 *size = MIN(*size, block->max_length - offset_inside_block);
1824
1825 if (xen_enabled() && block->host == NULL) {
1826 /* We need to check if the requested address is in the RAM
1827 * because we don't want to map the entire memory in QEMU.
1828 * In that case just map the requested area.
1829 */
1830 if (block->offset == 0) {
1831 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001832 }
1833
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001834 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001835 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001836
1837 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001838}
1839
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001840/*
1841 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1842 * in that RAMBlock.
1843 *
1844 * ptr: Host pointer to look up
1845 * round_offset: If true round the result offset down to a page boundary
1846 * *ram_addr: set to result ram_addr
1847 * *offset: set to result offset within the RAMBlock
1848 *
1849 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001850 *
1851 * By the time this function returns, the returned pointer is not protected
1852 * by RCU anymore. If the caller is not within an RCU critical section and
1853 * does not hold the iothread lock, it must have other means of protecting the
1854 * pointer, such as a reference to the region that includes the incoming
1855 * ram_addr_t.
1856 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001857RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1858 ram_addr_t *ram_addr,
1859 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001860{
pbrook94a6b542009-04-11 17:15:54 +00001861 RAMBlock *block;
1862 uint8_t *host = ptr;
1863
Jan Kiszka868bb332011-06-21 22:59:09 +02001864 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001865 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001866 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001867 block = qemu_get_ram_block(*ram_addr);
1868 if (block) {
1869 *offset = (host - block->host);
1870 }
Mike Day0dc3f442013-09-05 14:41:35 -04001871 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001872 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001873 }
1874
Mike Day0dc3f442013-09-05 14:41:35 -04001875 rcu_read_lock();
1876 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001877 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001878 goto found;
1879 }
1880
Mike Day0dc3f442013-09-05 14:41:35 -04001881 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001882 /* This case append when the block is not mapped. */
1883 if (block->host == NULL) {
1884 continue;
1885 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001886 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001887 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001888 }
pbrook94a6b542009-04-11 17:15:54 +00001889 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001890
Mike Day0dc3f442013-09-05 14:41:35 -04001891 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001892 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001893
1894found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001895 *offset = (host - block->host);
1896 if (round_offset) {
1897 *offset &= TARGET_PAGE_MASK;
1898 }
1899 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001900 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001901 return block;
1902}
1903
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001904/*
1905 * Finds the named RAMBlock
1906 *
1907 * name: The name of RAMBlock to find
1908 *
1909 * Returns: RAMBlock (or NULL if not found)
1910 */
1911RAMBlock *qemu_ram_block_by_name(const char *name)
1912{
1913 RAMBlock *block;
1914
1915 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1916 if (!strcmp(name, block->idstr)) {
1917 return block;
1918 }
1919 }
1920
1921 return NULL;
1922}
1923
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001924/* Some of the softmmu routines need to translate from a host pointer
1925 (typically a TLB entry) back to a ram offset. */
1926MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1927{
1928 RAMBlock *block;
1929 ram_addr_t offset; /* Not used */
1930
1931 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1932
1933 if (!block) {
1934 return NULL;
1935 }
1936
1937 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001938}
Alex Williamsonf471a172010-06-11 11:11:42 -06001939
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001940/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001941static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001942 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001943{
Juan Quintela52159192013-10-08 12:44:04 +02001944 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001945 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001946 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001947 switch (size) {
1948 case 1:
1949 stb_p(qemu_get_ram_ptr(ram_addr), val);
1950 break;
1951 case 2:
1952 stw_p(qemu_get_ram_ptr(ram_addr), val);
1953 break;
1954 case 4:
1955 stl_p(qemu_get_ram_ptr(ram_addr), val);
1956 break;
1957 default:
1958 abort();
1959 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001960 /* Set both VGA and migration bits for simplicity and to remove
1961 * the notdirty callback faster.
1962 */
1963 cpu_physical_memory_set_dirty_range(ram_addr, size,
1964 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001965 /* we remove the notdirty callback only if the code has been
1966 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001967 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001968 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001969 }
bellard1ccde1c2004-02-06 19:46:14 +00001970}
1971
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001972static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1973 unsigned size, bool is_write)
1974{
1975 return is_write;
1976}
1977
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001978static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001979 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001980 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001981 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001982};
1983
pbrook0f459d12008-06-09 00:20:13 +00001984/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001985static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001986{
Andreas Färber93afead2013-08-26 03:41:01 +02001987 CPUState *cpu = current_cpu;
1988 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001989 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001990 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001991 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001992 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001993
Andreas Färberff4700b2013-08-26 18:23:18 +02001994 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001995 /* We re-entered the check after replacing the TB. Now raise
1996 * the debug interrupt so that is will trigger after the
1997 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001998 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001999 return;
2000 }
Andreas Färber93afead2013-08-26 03:41:01 +02002001 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002002 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002003 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2004 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002005 if (flags == BP_MEM_READ) {
2006 wp->flags |= BP_WATCHPOINT_HIT_READ;
2007 } else {
2008 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2009 }
2010 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002011 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002012 if (!cpu->watchpoint_hit) {
2013 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002014 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002015 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002016 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002017 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002018 } else {
2019 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002020 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002021 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002022 }
aliguori06d55cc2008-11-18 20:24:06 +00002023 }
aliguori6e140f22008-11-18 20:37:55 +00002024 } else {
2025 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002026 }
2027 }
2028}
2029
pbrook6658ffb2007-03-16 23:58:11 +00002030/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2031 so these check for a hit then pass through to the normal out-of-line
2032 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002033static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2034 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002035{
Peter Maydell66b9b432015-04-26 16:49:24 +01002036 MemTxResult res;
2037 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002038
Peter Maydell66b9b432015-04-26 16:49:24 +01002039 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002040 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002041 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002042 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002043 break;
2044 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002045 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002046 break;
2047 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002048 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002049 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002050 default: abort();
2051 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002052 *pdata = data;
2053 return res;
2054}
2055
2056static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2057 uint64_t val, unsigned size,
2058 MemTxAttrs attrs)
2059{
2060 MemTxResult res;
2061
2062 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2063 switch (size) {
2064 case 1:
2065 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2066 break;
2067 case 2:
2068 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2069 break;
2070 case 4:
2071 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2072 break;
2073 default: abort();
2074 }
2075 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002076}
2077
Avi Kivity1ec9b902012-01-02 12:47:48 +02002078static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002079 .read_with_attrs = watch_mem_read,
2080 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002081 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002082};
pbrook6658ffb2007-03-16 23:58:11 +00002083
Peter Maydellf25a49e2015-04-26 16:49:24 +01002084static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2085 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002086{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002087 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002088 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002089 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002090
blueswir1db7b5422007-05-26 17:36:03 +00002091#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002092 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002093 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002094#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002095 res = address_space_read(subpage->as, addr + subpage->base,
2096 attrs, buf, len);
2097 if (res) {
2098 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002099 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002100 switch (len) {
2101 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002102 *data = ldub_p(buf);
2103 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002104 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002105 *data = lduw_p(buf);
2106 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002107 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002108 *data = ldl_p(buf);
2109 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002110 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002111 *data = ldq_p(buf);
2112 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002113 default:
2114 abort();
2115 }
blueswir1db7b5422007-05-26 17:36:03 +00002116}
2117
Peter Maydellf25a49e2015-04-26 16:49:24 +01002118static MemTxResult subpage_write(void *opaque, hwaddr addr,
2119 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002120{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002121 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002122 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002123
blueswir1db7b5422007-05-26 17:36:03 +00002124#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002125 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002126 " value %"PRIx64"\n",
2127 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002128#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002129 switch (len) {
2130 case 1:
2131 stb_p(buf, value);
2132 break;
2133 case 2:
2134 stw_p(buf, value);
2135 break;
2136 case 4:
2137 stl_p(buf, value);
2138 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002139 case 8:
2140 stq_p(buf, value);
2141 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 default:
2143 abort();
2144 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002145 return address_space_write(subpage->as, addr + subpage->base,
2146 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002147}
2148
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002149static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002150 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002151{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002152 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002153#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002154 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002155 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002156#endif
2157
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002158 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002159 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002160}
2161
Avi Kivity70c68e42012-01-02 12:32:48 +02002162static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002163 .read_with_attrs = subpage_read,
2164 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002165 .impl.min_access_size = 1,
2166 .impl.max_access_size = 8,
2167 .valid.min_access_size = 1,
2168 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002169 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002170 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002171};
2172
Anthony Liguoric227f092009-10-01 16:12:16 -05002173static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002174 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002175{
2176 int idx, eidx;
2177
2178 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2179 return -1;
2180 idx = SUBPAGE_IDX(start);
2181 eidx = SUBPAGE_IDX(end);
2182#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002183 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2184 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002185#endif
blueswir1db7b5422007-05-26 17:36:03 +00002186 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002187 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002188 }
2189
2190 return 0;
2191}
2192
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002193static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002194{
Anthony Liguoric227f092009-10-01 16:12:16 -05002195 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002196
Anthony Liguori7267c092011-08-20 22:09:37 -05002197 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002198
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002200 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002201 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002202 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002203 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002204#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002205 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2206 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002207#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002208 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002209
2210 return mmio;
2211}
2212
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002213static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2214 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002215{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002216 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002217 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002218 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002219 .mr = mr,
2220 .offset_within_address_space = 0,
2221 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002222 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002223 };
2224
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002225 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002226}
2227
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002228MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002229{
Peter Maydell32857f42015-10-01 15:29:50 +01002230 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2231 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002232 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002233
2234 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002235}
2236
Avi Kivitye9179ce2009-06-14 11:38:52 +03002237static void io_mem_init(void)
2238{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002239 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002240 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002241 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002242 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002243 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002244 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002245 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002246}
2247
Avi Kivityac1970f2012-10-03 16:22:53 +02002248static void mem_begin(MemoryListener *listener)
2249{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002250 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002251 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2252 uint16_t n;
2253
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002254 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002255 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002256 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002257 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002258 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002259 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002260 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002261 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002262
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002263 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002264 d->as = as;
2265 as->next_dispatch = d;
2266}
2267
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002268static void address_space_dispatch_free(AddressSpaceDispatch *d)
2269{
2270 phys_sections_free(&d->map);
2271 g_free(d);
2272}
2273
Paolo Bonzini00752702013-05-29 12:13:54 +02002274static void mem_commit(MemoryListener *listener)
2275{
2276 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002277 AddressSpaceDispatch *cur = as->dispatch;
2278 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002279
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002280 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002281
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002282 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002283 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002284 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002285 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002286}
2287
Avi Kivity1d711482012-10-02 18:54:45 +02002288static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002289{
Peter Maydell32857f42015-10-01 15:29:50 +01002290 CPUAddressSpace *cpuas;
2291 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002292
2293 /* since each CPU stores ram addresses in its TLB cache, we must
2294 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002295 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2296 cpu_reloading_memory_map();
2297 /* The CPU and TLB are protected by the iothread lock.
2298 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2299 * may have split the RCU critical section.
2300 */
2301 d = atomic_rcu_read(&cpuas->as->dispatch);
2302 cpuas->memory_dispatch = d;
2303 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002304}
2305
Avi Kivityac1970f2012-10-03 16:22:53 +02002306void address_space_init_dispatch(AddressSpace *as)
2307{
Paolo Bonzini00752702013-05-29 12:13:54 +02002308 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002309 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002310 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002311 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002312 .region_add = mem_add,
2313 .region_nop = mem_add,
2314 .priority = 0,
2315 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002316 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002317}
2318
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002319void address_space_unregister(AddressSpace *as)
2320{
2321 memory_listener_unregister(&as->dispatch_listener);
2322}
2323
Avi Kivity83f3c252012-10-07 12:59:55 +02002324void address_space_destroy_dispatch(AddressSpace *as)
2325{
2326 AddressSpaceDispatch *d = as->dispatch;
2327
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002328 atomic_rcu_set(&as->dispatch, NULL);
2329 if (d) {
2330 call_rcu(d, address_space_dispatch_free, rcu);
2331 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002332}
2333
Avi Kivity62152b82011-07-26 14:26:14 +03002334static void memory_map_init(void)
2335{
Anthony Liguori7267c092011-08-20 22:09:37 -05002336 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002337
Paolo Bonzini57271d62013-11-07 17:14:37 +01002338 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002339 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002340
Anthony Liguori7267c092011-08-20 22:09:37 -05002341 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002342 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2343 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002344 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002345}
2346
2347MemoryRegion *get_system_memory(void)
2348{
2349 return system_memory;
2350}
2351
Avi Kivity309cb472011-08-08 16:09:03 +03002352MemoryRegion *get_system_io(void)
2353{
2354 return system_io;
2355}
2356
pbrooke2eef172008-06-08 01:09:01 +00002357#endif /* !defined(CONFIG_USER_ONLY) */
2358
bellard13eb76e2004-01-24 15:23:36 +00002359/* physical memory access (slow version, mainly for debug) */
2360#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002361int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002362 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002363{
2364 int l, flags;
2365 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002366 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002367
2368 while (len > 0) {
2369 page = addr & TARGET_PAGE_MASK;
2370 l = (page + TARGET_PAGE_SIZE) - addr;
2371 if (l > len)
2372 l = len;
2373 flags = page_get_flags(page);
2374 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002375 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002376 if (is_write) {
2377 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002378 return -1;
bellard579a97f2007-11-11 14:26:47 +00002379 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002380 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002381 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002382 memcpy(p, buf, l);
2383 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002384 } else {
2385 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002386 return -1;
bellard579a97f2007-11-11 14:26:47 +00002387 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002388 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002389 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002390 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002391 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002392 }
2393 len -= l;
2394 buf += l;
2395 addr += l;
2396 }
Paul Brooka68fe892010-03-01 00:08:59 +00002397 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002398}
bellard8df1cd02005-01-28 22:37:22 +00002399
bellard13eb76e2004-01-24 15:23:36 +00002400#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002401
Paolo Bonzini845b6212015-03-23 11:45:53 +01002402static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002403 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002404{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002405 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2406 /* No early return if dirty_log_mask is or becomes 0, because
2407 * cpu_physical_memory_set_dirty_range will still call
2408 * xen_modified_memory.
2409 */
2410 if (dirty_log_mask) {
2411 dirty_log_mask =
2412 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002413 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002414 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2415 tb_invalidate_phys_range(addr, addr + length);
2416 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2417 }
2418 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002419}
2420
Richard Henderson23326162013-07-08 14:55:59 -07002421static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002422{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002423 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002424
2425 /* Regions are assumed to support 1-4 byte accesses unless
2426 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002427 if (access_size_max == 0) {
2428 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002429 }
Richard Henderson23326162013-07-08 14:55:59 -07002430
2431 /* Bound the maximum access by the alignment of the address. */
2432 if (!mr->ops->impl.unaligned) {
2433 unsigned align_size_max = addr & -addr;
2434 if (align_size_max != 0 && align_size_max < access_size_max) {
2435 access_size_max = align_size_max;
2436 }
2437 }
2438
2439 /* Don't attempt accesses larger than the maximum. */
2440 if (l > access_size_max) {
2441 l = access_size_max;
2442 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002443 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002444
2445 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002446}
2447
Jan Kiszka4840f102015-06-18 18:47:22 +02002448static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002449{
Jan Kiszka4840f102015-06-18 18:47:22 +02002450 bool unlocked = !qemu_mutex_iothread_locked();
2451 bool release_lock = false;
2452
2453 if (unlocked && mr->global_locking) {
2454 qemu_mutex_lock_iothread();
2455 unlocked = false;
2456 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002457 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002458 if (mr->flush_coalesced_mmio) {
2459 if (unlocked) {
2460 qemu_mutex_lock_iothread();
2461 }
2462 qemu_flush_coalesced_mmio_buffer();
2463 if (unlocked) {
2464 qemu_mutex_unlock_iothread();
2465 }
2466 }
2467
2468 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002469}
2470
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002471MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2472 const uint8_t *buf, int len)
bellard13eb76e2004-01-24 15:23:36 +00002473{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002474 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002475 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002476 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002477 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002478 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002479 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002480 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002481
Paolo Bonzini41063e12015-03-18 14:21:43 +01002482 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002483 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002484 l = len;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002485 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002486
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002487 if (!memory_access_is_direct(mr, true)) {
2488 release_lock |= prepare_mmio_access(mr);
2489 l = memory_access_size(mr, l, addr1);
2490 /* XXX: could force current_cpu to NULL to avoid
2491 potential bugs */
2492 switch (l) {
2493 case 8:
2494 /* 64 bit write access */
2495 val = ldq_p(buf);
2496 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2497 attrs);
2498 break;
2499 case 4:
2500 /* 32 bit write access */
2501 val = ldl_p(buf);
2502 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2503 attrs);
2504 break;
2505 case 2:
2506 /* 16 bit write access */
2507 val = lduw_p(buf);
2508 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2509 attrs);
2510 break;
2511 case 1:
2512 /* 8 bit write access */
2513 val = ldub_p(buf);
2514 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2515 attrs);
2516 break;
2517 default:
2518 abort();
bellard13eb76e2004-01-24 15:23:36 +00002519 }
2520 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002521 addr1 += memory_region_get_ram_addr(mr);
2522 /* RAM case */
2523 ptr = qemu_get_ram_ptr(addr1);
2524 memcpy(ptr, buf, l);
2525 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002526 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002527
2528 if (release_lock) {
2529 qemu_mutex_unlock_iothread();
2530 release_lock = false;
2531 }
2532
bellard13eb76e2004-01-24 15:23:36 +00002533 len -= l;
2534 buf += l;
2535 addr += l;
2536 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002537 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002538
Peter Maydell3b643492015-04-26 16:49:23 +01002539 return result;
bellard13eb76e2004-01-24 15:23:36 +00002540}
bellard8df1cd02005-01-28 22:37:22 +00002541
Peter Maydell5c9eb022015-04-26 16:49:24 +01002542MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2543 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002544{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002545 hwaddr l;
2546 uint8_t *ptr;
2547 uint64_t val;
2548 hwaddr addr1;
2549 MemoryRegion *mr;
2550 MemTxResult result = MEMTX_OK;
2551 bool release_lock = false;
2552
2553 rcu_read_lock();
2554 while (len > 0) {
2555 l = len;
2556 mr = address_space_translate(as, addr, &addr1, &l, false);
2557
2558 if (!memory_access_is_direct(mr, false)) {
2559 /* I/O case */
2560 release_lock |= prepare_mmio_access(mr);
2561 l = memory_access_size(mr, l, addr1);
2562 switch (l) {
2563 case 8:
2564 /* 64 bit read access */
2565 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2566 attrs);
2567 stq_p(buf, val);
2568 break;
2569 case 4:
2570 /* 32 bit read access */
2571 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2572 attrs);
2573 stl_p(buf, val);
2574 break;
2575 case 2:
2576 /* 16 bit read access */
2577 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2578 attrs);
2579 stw_p(buf, val);
2580 break;
2581 case 1:
2582 /* 8 bit read access */
2583 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2584 attrs);
2585 stb_p(buf, val);
2586 break;
2587 default:
2588 abort();
2589 }
2590 } else {
2591 /* RAM case */
2592 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2593 memcpy(buf, ptr, l);
2594 }
2595
2596 if (release_lock) {
2597 qemu_mutex_unlock_iothread();
2598 release_lock = false;
2599 }
2600
2601 len -= l;
2602 buf += l;
2603 addr += l;
2604 }
2605 rcu_read_unlock();
2606
2607 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002608}
2609
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002610MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2611 uint8_t *buf, int len, bool is_write)
2612{
2613 if (is_write) {
2614 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2615 } else {
2616 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2617 }
2618}
Avi Kivityac1970f2012-10-03 16:22:53 +02002619
Avi Kivitya8170e52012-10-23 12:30:10 +02002620void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002621 int len, int is_write)
2622{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002623 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2624 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002625}
2626
Alexander Graf582b55a2013-12-11 14:17:44 +01002627enum write_rom_type {
2628 WRITE_DATA,
2629 FLUSH_CACHE,
2630};
2631
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002632static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002633 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002634{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002635 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002636 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002637 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002638 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002639
Paolo Bonzini41063e12015-03-18 14:21:43 +01002640 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002641 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002642 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002643 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002644
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002645 if (!(memory_region_is_ram(mr) ||
2646 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002647 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002648 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002649 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002650 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002651 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002652 switch (type) {
2653 case WRITE_DATA:
2654 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002655 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002656 break;
2657 case FLUSH_CACHE:
2658 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2659 break;
2660 }
bellardd0ecd2a2006-04-23 17:14:48 +00002661 }
2662 len -= l;
2663 buf += l;
2664 addr += l;
2665 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002666 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002667}
2668
Alexander Graf582b55a2013-12-11 14:17:44 +01002669/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002670void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002671 const uint8_t *buf, int len)
2672{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002673 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002674}
2675
2676void cpu_flush_icache_range(hwaddr start, int len)
2677{
2678 /*
2679 * This function should do the same thing as an icache flush that was
2680 * triggered from within the guest. For TCG we are always cache coherent,
2681 * so there is no need to flush anything. For KVM / Xen we need to flush
2682 * the host's instruction cache at least.
2683 */
2684 if (tcg_enabled()) {
2685 return;
2686 }
2687
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002688 cpu_physical_memory_write_rom_internal(&address_space_memory,
2689 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002690}
2691
aliguori6d16c2f2009-01-22 16:59:11 +00002692typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002693 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002694 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002695 hwaddr addr;
2696 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002697 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002698} BounceBuffer;
2699
2700static BounceBuffer bounce;
2701
aliguoriba223c22009-01-22 16:59:16 +00002702typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002703 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002704 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002705} MapClient;
2706
Fam Zheng38e047b2015-03-16 17:03:35 +08002707QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002708static QLIST_HEAD(map_client_list, MapClient) map_client_list
2709 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002710
Fam Zhenge95205e2015-03-16 17:03:37 +08002711static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002712{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002713 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002714 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002715}
2716
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002717static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002718{
2719 MapClient *client;
2720
Blue Swirl72cf2d42009-09-12 07:36:22 +00002721 while (!QLIST_EMPTY(&map_client_list)) {
2722 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002723 qemu_bh_schedule(client->bh);
2724 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002725 }
2726}
2727
Fam Zhenge95205e2015-03-16 17:03:37 +08002728void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002729{
2730 MapClient *client = g_malloc(sizeof(*client));
2731
Fam Zheng38e047b2015-03-16 17:03:35 +08002732 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002733 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002734 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002735 if (!atomic_read(&bounce.in_use)) {
2736 cpu_notify_map_clients_locked();
2737 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002738 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002739}
2740
Fam Zheng38e047b2015-03-16 17:03:35 +08002741void cpu_exec_init_all(void)
2742{
2743 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002744 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002745 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002746 qemu_mutex_init(&map_client_list_lock);
2747}
2748
Fam Zhenge95205e2015-03-16 17:03:37 +08002749void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002750{
Fam Zhenge95205e2015-03-16 17:03:37 +08002751 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002752
Fam Zhenge95205e2015-03-16 17:03:37 +08002753 qemu_mutex_lock(&map_client_list_lock);
2754 QLIST_FOREACH(client, &map_client_list, link) {
2755 if (client->bh == bh) {
2756 cpu_unregister_map_client_do(client);
2757 break;
2758 }
2759 }
2760 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002761}
2762
2763static void cpu_notify_map_clients(void)
2764{
Fam Zheng38e047b2015-03-16 17:03:35 +08002765 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002766 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002767 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002768}
2769
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002770bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2771{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002772 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002773 hwaddr l, xlat;
2774
Paolo Bonzini41063e12015-03-18 14:21:43 +01002775 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002776 while (len > 0) {
2777 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002778 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2779 if (!memory_access_is_direct(mr, is_write)) {
2780 l = memory_access_size(mr, l, addr);
2781 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002782 return false;
2783 }
2784 }
2785
2786 len -= l;
2787 addr += l;
2788 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002789 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002790 return true;
2791}
2792
aliguori6d16c2f2009-01-22 16:59:11 +00002793/* Map a physical memory region into a host virtual address.
2794 * May map a subset of the requested range, given by and returned in *plen.
2795 * May return NULL if resources needed to perform the mapping are exhausted.
2796 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002797 * Use cpu_register_map_client() to know when retrying the map operation is
2798 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002799 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002800void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002801 hwaddr addr,
2802 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002803 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002804{
Avi Kivitya8170e52012-10-23 12:30:10 +02002805 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002806 hwaddr done = 0;
2807 hwaddr l, xlat, base;
2808 MemoryRegion *mr, *this_mr;
2809 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002810 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002811
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002812 if (len == 0) {
2813 return NULL;
2814 }
aliguori6d16c2f2009-01-22 16:59:11 +00002815
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002816 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002817 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002818 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002819
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002820 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002821 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002822 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002823 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002824 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002825 /* Avoid unbounded allocations */
2826 l = MIN(l, TARGET_PAGE_SIZE);
2827 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002828 bounce.addr = addr;
2829 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002830
2831 memory_region_ref(mr);
2832 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002833 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002834 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2835 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002836 }
aliguori6d16c2f2009-01-22 16:59:11 +00002837
Paolo Bonzini41063e12015-03-18 14:21:43 +01002838 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002839 *plen = l;
2840 return bounce.buffer;
2841 }
2842
2843 base = xlat;
2844 raddr = memory_region_get_ram_addr(mr);
2845
2846 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002847 len -= l;
2848 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002849 done += l;
2850 if (len == 0) {
2851 break;
2852 }
2853
2854 l = len;
2855 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2856 if (this_mr != mr || xlat != base + done) {
2857 break;
2858 }
aliguori6d16c2f2009-01-22 16:59:11 +00002859 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002860
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002861 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002862 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002863 ptr = qemu_ram_ptr_length(raddr + base, plen);
2864 rcu_read_unlock();
2865
2866 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002867}
2868
Avi Kivityac1970f2012-10-03 16:22:53 +02002869/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002870 * Will also mark the memory as dirty if is_write == 1. access_len gives
2871 * the amount of memory that was actually read or written by the caller.
2872 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002873void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2874 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002875{
2876 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002877 MemoryRegion *mr;
2878 ram_addr_t addr1;
2879
2880 mr = qemu_ram_addr_from_host(buffer, &addr1);
2881 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002882 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002883 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002884 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002885 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002886 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002887 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002888 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002889 return;
2890 }
2891 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002892 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2893 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002894 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002895 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002896 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002897 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002898 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002899 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002900}
bellardd0ecd2a2006-04-23 17:14:48 +00002901
Avi Kivitya8170e52012-10-23 12:30:10 +02002902void *cpu_physical_memory_map(hwaddr addr,
2903 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002904 int is_write)
2905{
2906 return address_space_map(&address_space_memory, addr, plen, is_write);
2907}
2908
Avi Kivitya8170e52012-10-23 12:30:10 +02002909void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2910 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002911{
2912 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2913}
2914
bellard8df1cd02005-01-28 22:37:22 +00002915/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002916static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2917 MemTxAttrs attrs,
2918 MemTxResult *result,
2919 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002920{
bellard8df1cd02005-01-28 22:37:22 +00002921 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002922 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002923 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002924 hwaddr l = 4;
2925 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002926 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002927 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002928
Paolo Bonzini41063e12015-03-18 14:21:43 +01002929 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002930 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002931 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002932 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002933
bellard8df1cd02005-01-28 22:37:22 +00002934 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002935 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002936#if defined(TARGET_WORDS_BIGENDIAN)
2937 if (endian == DEVICE_LITTLE_ENDIAN) {
2938 val = bswap32(val);
2939 }
2940#else
2941 if (endian == DEVICE_BIG_ENDIAN) {
2942 val = bswap32(val);
2943 }
2944#endif
bellard8df1cd02005-01-28 22:37:22 +00002945 } else {
2946 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002947 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002948 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002949 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002950 switch (endian) {
2951 case DEVICE_LITTLE_ENDIAN:
2952 val = ldl_le_p(ptr);
2953 break;
2954 case DEVICE_BIG_ENDIAN:
2955 val = ldl_be_p(ptr);
2956 break;
2957 default:
2958 val = ldl_p(ptr);
2959 break;
2960 }
Peter Maydell50013112015-04-26 16:49:24 +01002961 r = MEMTX_OK;
2962 }
2963 if (result) {
2964 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002965 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002966 if (release_lock) {
2967 qemu_mutex_unlock_iothread();
2968 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002969 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002970 return val;
2971}
2972
Peter Maydell50013112015-04-26 16:49:24 +01002973uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2974 MemTxAttrs attrs, MemTxResult *result)
2975{
2976 return address_space_ldl_internal(as, addr, attrs, result,
2977 DEVICE_NATIVE_ENDIAN);
2978}
2979
2980uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2981 MemTxAttrs attrs, MemTxResult *result)
2982{
2983 return address_space_ldl_internal(as, addr, attrs, result,
2984 DEVICE_LITTLE_ENDIAN);
2985}
2986
2987uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2988 MemTxAttrs attrs, MemTxResult *result)
2989{
2990 return address_space_ldl_internal(as, addr, attrs, result,
2991 DEVICE_BIG_ENDIAN);
2992}
2993
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002994uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002995{
Peter Maydell50013112015-04-26 16:49:24 +01002996 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002997}
2998
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002999uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003000{
Peter Maydell50013112015-04-26 16:49:24 +01003001 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003002}
3003
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003004uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003005{
Peter Maydell50013112015-04-26 16:49:24 +01003006 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007}
3008
bellard84b7b8e2005-11-28 21:19:04 +00003009/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003010static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3011 MemTxAttrs attrs,
3012 MemTxResult *result,
3013 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003014{
bellard84b7b8e2005-11-28 21:19:04 +00003015 uint8_t *ptr;
3016 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003017 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003018 hwaddr l = 8;
3019 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003020 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003021 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003022
Paolo Bonzini41063e12015-03-18 14:21:43 +01003023 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003024 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003025 false);
3026 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003027 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003028
bellard84b7b8e2005-11-28 21:19:04 +00003029 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003030 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003031#if defined(TARGET_WORDS_BIGENDIAN)
3032 if (endian == DEVICE_LITTLE_ENDIAN) {
3033 val = bswap64(val);
3034 }
3035#else
3036 if (endian == DEVICE_BIG_ENDIAN) {
3037 val = bswap64(val);
3038 }
3039#endif
bellard84b7b8e2005-11-28 21:19:04 +00003040 } else {
3041 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003042 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003043 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003044 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003045 switch (endian) {
3046 case DEVICE_LITTLE_ENDIAN:
3047 val = ldq_le_p(ptr);
3048 break;
3049 case DEVICE_BIG_ENDIAN:
3050 val = ldq_be_p(ptr);
3051 break;
3052 default:
3053 val = ldq_p(ptr);
3054 break;
3055 }
Peter Maydell50013112015-04-26 16:49:24 +01003056 r = MEMTX_OK;
3057 }
3058 if (result) {
3059 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003060 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003061 if (release_lock) {
3062 qemu_mutex_unlock_iothread();
3063 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003064 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003065 return val;
3066}
3067
Peter Maydell50013112015-04-26 16:49:24 +01003068uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3069 MemTxAttrs attrs, MemTxResult *result)
3070{
3071 return address_space_ldq_internal(as, addr, attrs, result,
3072 DEVICE_NATIVE_ENDIAN);
3073}
3074
3075uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3076 MemTxAttrs attrs, MemTxResult *result)
3077{
3078 return address_space_ldq_internal(as, addr, attrs, result,
3079 DEVICE_LITTLE_ENDIAN);
3080}
3081
3082uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3083 MemTxAttrs attrs, MemTxResult *result)
3084{
3085 return address_space_ldq_internal(as, addr, attrs, result,
3086 DEVICE_BIG_ENDIAN);
3087}
3088
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003089uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003090{
Peter Maydell50013112015-04-26 16:49:24 +01003091 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003092}
3093
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003094uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003095{
Peter Maydell50013112015-04-26 16:49:24 +01003096 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003097}
3098
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003099uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003100{
Peter Maydell50013112015-04-26 16:49:24 +01003101 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003102}
3103
bellardaab33092005-10-30 20:48:42 +00003104/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003105uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3106 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003107{
3108 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003109 MemTxResult r;
3110
3111 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3112 if (result) {
3113 *result = r;
3114 }
bellardaab33092005-10-30 20:48:42 +00003115 return val;
3116}
3117
Peter Maydell50013112015-04-26 16:49:24 +01003118uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3119{
3120 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3121}
3122
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003123/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003124static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3125 hwaddr addr,
3126 MemTxAttrs attrs,
3127 MemTxResult *result,
3128 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003129{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003130 uint8_t *ptr;
3131 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003132 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003133 hwaddr l = 2;
3134 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003135 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003136 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003137
Paolo Bonzini41063e12015-03-18 14:21:43 +01003138 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003139 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003140 false);
3141 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003142 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003143
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003144 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003145 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003146#if defined(TARGET_WORDS_BIGENDIAN)
3147 if (endian == DEVICE_LITTLE_ENDIAN) {
3148 val = bswap16(val);
3149 }
3150#else
3151 if (endian == DEVICE_BIG_ENDIAN) {
3152 val = bswap16(val);
3153 }
3154#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003155 } else {
3156 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003157 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003158 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003159 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003160 switch (endian) {
3161 case DEVICE_LITTLE_ENDIAN:
3162 val = lduw_le_p(ptr);
3163 break;
3164 case DEVICE_BIG_ENDIAN:
3165 val = lduw_be_p(ptr);
3166 break;
3167 default:
3168 val = lduw_p(ptr);
3169 break;
3170 }
Peter Maydell50013112015-04-26 16:49:24 +01003171 r = MEMTX_OK;
3172 }
3173 if (result) {
3174 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003175 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003176 if (release_lock) {
3177 qemu_mutex_unlock_iothread();
3178 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003179 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003180 return val;
bellardaab33092005-10-30 20:48:42 +00003181}
3182
Peter Maydell50013112015-04-26 16:49:24 +01003183uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3184 MemTxAttrs attrs, MemTxResult *result)
3185{
3186 return address_space_lduw_internal(as, addr, attrs, result,
3187 DEVICE_NATIVE_ENDIAN);
3188}
3189
3190uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3191 MemTxAttrs attrs, MemTxResult *result)
3192{
3193 return address_space_lduw_internal(as, addr, attrs, result,
3194 DEVICE_LITTLE_ENDIAN);
3195}
3196
3197uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3198 MemTxAttrs attrs, MemTxResult *result)
3199{
3200 return address_space_lduw_internal(as, addr, attrs, result,
3201 DEVICE_BIG_ENDIAN);
3202}
3203
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003204uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003205{
Peter Maydell50013112015-04-26 16:49:24 +01003206 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003207}
3208
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003209uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003210{
Peter Maydell50013112015-04-26 16:49:24 +01003211 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003212}
3213
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003214uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003215{
Peter Maydell50013112015-04-26 16:49:24 +01003216 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003217}
3218
bellard8df1cd02005-01-28 22:37:22 +00003219/* warning: addr must be aligned. The ram page is not masked as dirty
3220 and the code inside is not invalidated. It is useful if the dirty
3221 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003222void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3223 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003224{
bellard8df1cd02005-01-28 22:37:22 +00003225 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003226 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003227 hwaddr l = 4;
3228 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003229 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003230 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003231 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003232
Paolo Bonzini41063e12015-03-18 14:21:43 +01003233 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003234 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003235 true);
3236 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003237 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003238
Peter Maydell50013112015-04-26 16:49:24 +01003239 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003240 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003241 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003242 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003243 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003244
Paolo Bonzini845b6212015-03-23 11:45:53 +01003245 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3246 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003247 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003248 r = MEMTX_OK;
3249 }
3250 if (result) {
3251 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003252 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003253 if (release_lock) {
3254 qemu_mutex_unlock_iothread();
3255 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003256 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003257}
3258
Peter Maydell50013112015-04-26 16:49:24 +01003259void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3260{
3261 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3262}
3263
bellard8df1cd02005-01-28 22:37:22 +00003264/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003265static inline void address_space_stl_internal(AddressSpace *as,
3266 hwaddr addr, uint32_t val,
3267 MemTxAttrs attrs,
3268 MemTxResult *result,
3269 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003270{
bellard8df1cd02005-01-28 22:37:22 +00003271 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003272 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003273 hwaddr l = 4;
3274 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003275 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003276 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003277
Paolo Bonzini41063e12015-03-18 14:21:43 +01003278 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003279 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003280 true);
3281 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003282 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003283
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003284#if defined(TARGET_WORDS_BIGENDIAN)
3285 if (endian == DEVICE_LITTLE_ENDIAN) {
3286 val = bswap32(val);
3287 }
3288#else
3289 if (endian == DEVICE_BIG_ENDIAN) {
3290 val = bswap32(val);
3291 }
3292#endif
Peter Maydell50013112015-04-26 16:49:24 +01003293 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003294 } else {
bellard8df1cd02005-01-28 22:37:22 +00003295 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003296 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003297 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003298 switch (endian) {
3299 case DEVICE_LITTLE_ENDIAN:
3300 stl_le_p(ptr, val);
3301 break;
3302 case DEVICE_BIG_ENDIAN:
3303 stl_be_p(ptr, val);
3304 break;
3305 default:
3306 stl_p(ptr, val);
3307 break;
3308 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003309 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003310 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003311 }
Peter Maydell50013112015-04-26 16:49:24 +01003312 if (result) {
3313 *result = r;
3314 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003315 if (release_lock) {
3316 qemu_mutex_unlock_iothread();
3317 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003318 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003319}
3320
3321void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3322 MemTxAttrs attrs, MemTxResult *result)
3323{
3324 address_space_stl_internal(as, addr, val, attrs, result,
3325 DEVICE_NATIVE_ENDIAN);
3326}
3327
3328void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3329 MemTxAttrs attrs, MemTxResult *result)
3330{
3331 address_space_stl_internal(as, addr, val, attrs, result,
3332 DEVICE_LITTLE_ENDIAN);
3333}
3334
3335void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3336 MemTxAttrs attrs, MemTxResult *result)
3337{
3338 address_space_stl_internal(as, addr, val, attrs, result,
3339 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003340}
3341
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003342void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003343{
Peter Maydell50013112015-04-26 16:49:24 +01003344 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003345}
3346
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003347void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003348{
Peter Maydell50013112015-04-26 16:49:24 +01003349 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003350}
3351
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003352void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003353{
Peter Maydell50013112015-04-26 16:49:24 +01003354 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003355}
3356
bellardaab33092005-10-30 20:48:42 +00003357/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003358void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3359 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003360{
3361 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003362 MemTxResult r;
3363
3364 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3365 if (result) {
3366 *result = r;
3367 }
3368}
3369
3370void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3371{
3372 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003373}
3374
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003375/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003376static inline void address_space_stw_internal(AddressSpace *as,
3377 hwaddr addr, uint32_t val,
3378 MemTxAttrs attrs,
3379 MemTxResult *result,
3380 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003381{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003382 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003383 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003384 hwaddr l = 2;
3385 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003386 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003387 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003388
Paolo Bonzini41063e12015-03-18 14:21:43 +01003389 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003390 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003391 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003392 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003393
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003394#if defined(TARGET_WORDS_BIGENDIAN)
3395 if (endian == DEVICE_LITTLE_ENDIAN) {
3396 val = bswap16(val);
3397 }
3398#else
3399 if (endian == DEVICE_BIG_ENDIAN) {
3400 val = bswap16(val);
3401 }
3402#endif
Peter Maydell50013112015-04-26 16:49:24 +01003403 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003404 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003405 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003406 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003407 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003408 switch (endian) {
3409 case DEVICE_LITTLE_ENDIAN:
3410 stw_le_p(ptr, val);
3411 break;
3412 case DEVICE_BIG_ENDIAN:
3413 stw_be_p(ptr, val);
3414 break;
3415 default:
3416 stw_p(ptr, val);
3417 break;
3418 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003419 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003420 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003421 }
Peter Maydell50013112015-04-26 16:49:24 +01003422 if (result) {
3423 *result = r;
3424 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003425 if (release_lock) {
3426 qemu_mutex_unlock_iothread();
3427 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003428 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003429}
3430
3431void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3432 MemTxAttrs attrs, MemTxResult *result)
3433{
3434 address_space_stw_internal(as, addr, val, attrs, result,
3435 DEVICE_NATIVE_ENDIAN);
3436}
3437
3438void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3439 MemTxAttrs attrs, MemTxResult *result)
3440{
3441 address_space_stw_internal(as, addr, val, attrs, result,
3442 DEVICE_LITTLE_ENDIAN);
3443}
3444
3445void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3446 MemTxAttrs attrs, MemTxResult *result)
3447{
3448 address_space_stw_internal(as, addr, val, attrs, result,
3449 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003450}
3451
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003452void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003453{
Peter Maydell50013112015-04-26 16:49:24 +01003454 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003455}
3456
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003457void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003458{
Peter Maydell50013112015-04-26 16:49:24 +01003459 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003460}
3461
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003462void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003463{
Peter Maydell50013112015-04-26 16:49:24 +01003464 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003465}
3466
bellardaab33092005-10-30 20:48:42 +00003467/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003468void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3469 MemTxAttrs attrs, MemTxResult *result)
3470{
3471 MemTxResult r;
3472 val = tswap64(val);
3473 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3474 if (result) {
3475 *result = r;
3476 }
3477}
3478
3479void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3480 MemTxAttrs attrs, MemTxResult *result)
3481{
3482 MemTxResult r;
3483 val = cpu_to_le64(val);
3484 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3485 if (result) {
3486 *result = r;
3487 }
3488}
3489void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3490 MemTxAttrs attrs, MemTxResult *result)
3491{
3492 MemTxResult r;
3493 val = cpu_to_be64(val);
3494 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3495 if (result) {
3496 *result = r;
3497 }
3498}
3499
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003500void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003501{
Peter Maydell50013112015-04-26 16:49:24 +01003502 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003503}
3504
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003505void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003506{
Peter Maydell50013112015-04-26 16:49:24 +01003507 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003508}
3509
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003510void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003511{
Peter Maydell50013112015-04-26 16:49:24 +01003512 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003513}
3514
aliguori5e2972f2009-03-28 17:51:36 +00003515/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003516int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003517 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003518{
3519 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003520 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003521 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003522
3523 while (len > 0) {
3524 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003525 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003526 /* if no physical page mapped, return an error */
3527 if (phys_addr == -1)
3528 return -1;
3529 l = (page + TARGET_PAGE_SIZE) - addr;
3530 if (l > len)
3531 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003532 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003533 if (is_write) {
3534 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3535 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003536 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3537 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003538 }
bellard13eb76e2004-01-24 15:23:36 +00003539 len -= l;
3540 buf += l;
3541 addr += l;
3542 }
3543 return 0;
3544}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003545
3546/*
3547 * Allows code that needs to deal with migration bitmaps etc to still be built
3548 * target independent.
3549 */
3550size_t qemu_target_page_bits(void)
3551{
3552 return TARGET_PAGE_BITS;
3553}
3554
Paul Brooka68fe892010-03-01 00:08:59 +00003555#endif
bellard13eb76e2004-01-24 15:23:36 +00003556
Blue Swirl8e4a4242013-01-06 18:30:17 +00003557/*
3558 * A helper function for the _utterly broken_ virtio device model to find out if
3559 * it's running on a big endian machine. Don't do this at home kids!
3560 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003561bool target_words_bigendian(void);
3562bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003563{
3564#if defined(TARGET_WORDS_BIGENDIAN)
3565 return true;
3566#else
3567 return false;
3568#endif
3569}
3570
Wen Congyang76f35532012-05-07 12:04:18 +08003571#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003572bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003573{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003574 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003575 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003576 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003577
Paolo Bonzini41063e12015-03-18 14:21:43 +01003578 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003579 mr = address_space_translate(&address_space_memory,
3580 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003581
Paolo Bonzini41063e12015-03-18 14:21:43 +01003582 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3583 rcu_read_unlock();
3584 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003585}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003586
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003587int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003588{
3589 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003590 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003591
Mike Day0dc3f442013-09-05 14:41:35 -04003592 rcu_read_lock();
3593 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003594 ret = func(block->idstr, block->host, block->offset,
3595 block->used_length, opaque);
3596 if (ret) {
3597 break;
3598 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003599 }
Mike Day0dc3f442013-09-05 14:41:35 -04003600 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003601 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003602}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003603#endif