blob: 0e4f681360017059f08a43240a40f178e7f46474 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200311static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200312 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000313{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200314 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200315 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200317
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200318 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200319 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200320 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200321 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100323 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200324 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200325
326 if (sections[lp.ptr].size.hi ||
327 range_covers_byte(sections[lp.ptr].offset_within_address_space,
328 sections[lp.ptr].size.lo, addr)) {
329 return &sections[lp.ptr];
330 } else {
331 return &sections[PHYS_SECTION_UNASSIGNED];
332 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200333}
334
Blue Swirle5548612012-04-21 13:08:33 +0000335bool memory_region_is_unassigned(MemoryRegion *mr)
336{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200337 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000338 && mr != &io_mem_watch;
339}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200340
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100341/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200342static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200343 hwaddr addr,
344 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200345{
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 MemoryRegionSection *section;
347 subpage_t *subpage;
348
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200349 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200350 if (resolve_subpage && section->mr->subpage) {
351 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 }
354 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200355}
356
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100357/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200358static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200359address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200361{
362 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200363 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200365
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200366 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367 /* Compute offset within MemoryRegionSection */
368 addr -= section->offset_within_address_space;
369
370 /* Compute offset within MemoryRegion */
371 *xlat = addr + section->offset_within_region;
372
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200373 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200374
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
380 * here.
381 *
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
385 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200387 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 return section;
391}
Jan Kiszka90260c62013-05-26 21:46:51 +0200392
Paolo Bonzini41063e12015-03-18 14:21:43 +0100393/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200394MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
395 hwaddr *xlat, hwaddr *plen,
396 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200397{
Avi Kivity30951152012-10-30 13:47:46 +0200398 IOMMUTLBEntry iotlb;
399 MemoryRegionSection *section;
400 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200401
402 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100403 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
404 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200405 mr = section->mr;
406
407 if (!mr->iommu_ops) {
408 break;
409 }
410
Le Tan8d7b8cb2014-08-16 13:55:37 +0800411 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200412 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
413 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700414 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200415 if (!(iotlb.perm & (1 << is_write))) {
416 mr = &io_mem_unassigned;
417 break;
418 }
419
420 as = iotlb.target_as;
421 }
422
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000423 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100424 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700425 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100426 }
427
Avi Kivity30951152012-10-30 13:47:46 +0200428 *xlat = addr;
429 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200430}
431
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100432/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200433MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000434address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200435 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200436{
Avi Kivity30951152012-10-30 13:47:46 +0200437 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000438 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
439
440 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200441
442 assert(!section->mr->iommu_ops);
443 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200444}
bellard9fa3e852004-01-04 18:06:42 +0000445#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000446
Andreas Färberb170fce2013-01-20 20:23:22 +0100447#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000448
Juan Quintelae59fb372009-09-29 22:48:21 +0200449static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200450{
Andreas Färber259186a2013-01-17 18:51:17 +0100451 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200452
aurel323098dba2009-03-07 21:28:24 +0000453 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
454 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100455 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100456 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000457
458 return 0;
459}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200460
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400461static int cpu_common_pre_load(void *opaque)
462{
463 CPUState *cpu = opaque;
464
Paolo Bonziniadee6422014-12-19 12:53:14 +0100465 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400466
467 return 0;
468}
469
470static bool cpu_common_exception_index_needed(void *opaque)
471{
472 CPUState *cpu = opaque;
473
Paolo Bonziniadee6422014-12-19 12:53:14 +0100474 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475}
476
477static const VMStateDescription vmstate_cpu_common_exception_index = {
478 .name = "cpu_common/exception_index",
479 .version_id = 1,
480 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200481 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400482 .fields = (VMStateField[]) {
483 VMSTATE_INT32(exception_index, CPUState),
484 VMSTATE_END_OF_LIST()
485 }
486};
487
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300488static bool cpu_common_crash_occurred_needed(void *opaque)
489{
490 CPUState *cpu = opaque;
491
492 return cpu->crash_occurred;
493}
494
495static const VMStateDescription vmstate_cpu_common_crash_occurred = {
496 .name = "cpu_common/crash_occurred",
497 .version_id = 1,
498 .minimum_version_id = 1,
499 .needed = cpu_common_crash_occurred_needed,
500 .fields = (VMStateField[]) {
501 VMSTATE_BOOL(crash_occurred, CPUState),
502 VMSTATE_END_OF_LIST()
503 }
504};
505
Andreas Färber1a1562f2013-06-17 04:09:11 +0200506const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200507 .name = "cpu_common",
508 .version_id = 1,
509 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400510 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200511 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200512 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100513 VMSTATE_UINT32(halted, CPUState),
514 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200515 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400516 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200517 .subsections = (const VMStateDescription*[]) {
518 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300519 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200520 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 }
522};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200523
pbrook9656f322008-07-01 20:01:19 +0000524#endif
525
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100526CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400527{
Andreas Färberbdc44642013-06-24 23:50:24 +0200528 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400529
Andreas Färberbdc44642013-06-24 23:50:24 +0200530 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100531 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200532 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100533 }
Glauber Costa950f1472009-06-09 12:15:18 -0400534 }
535
Andreas Färberbdc44642013-06-24 23:50:24 +0200536 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400537}
538
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000539#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000540void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000541{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000542 CPUAddressSpace *newas;
543
544 /* Target code should have set num_ases before calling us */
545 assert(asidx < cpu->num_ases);
546
Peter Maydell56943e82016-01-21 14:15:04 +0000547 if (asidx == 0) {
548 /* address space 0 gets the convenience alias */
549 cpu->as = as;
550 }
551
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000552 /* KVM cannot currently support multiple address spaces. */
553 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000554
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000555 if (!cpu->cpu_ases) {
556 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000557 }
Peter Maydell32857f42015-10-01 15:29:50 +0100558
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000559 newas = &cpu->cpu_ases[asidx];
560 newas->cpu = cpu;
561 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000562 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000563 newas->tcg_as_listener.commit = tcg_commit;
564 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000565 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000566}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000567
568AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
569{
570 /* Return the AddressSpace corresponding to the specified index */
571 return cpu->cpu_ases[asidx].as;
572}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000573#endif
574
Bharata B Raob7bca732015-06-23 19:31:13 -0700575#ifndef CONFIG_USER_ONLY
576static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
577
578static int cpu_get_free_index(Error **errp)
579{
580 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
581
582 if (cpu >= MAX_CPUMASK_BITS) {
583 error_setg(errp, "Trying to use more CPUs than max of %d",
584 MAX_CPUMASK_BITS);
585 return -1;
586 }
587
588 bitmap_set(cpu_index_map, cpu, 1);
589 return cpu;
590}
591
592void cpu_exec_exit(CPUState *cpu)
593{
594 if (cpu->cpu_index == -1) {
595 /* cpu_index was never allocated by this @cpu or was already freed. */
596 return;
597 }
598
599 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
600 cpu->cpu_index = -1;
601}
602#else
603
604static int cpu_get_free_index(Error **errp)
605{
606 CPUState *some_cpu;
607 int cpu_index = 0;
608
609 CPU_FOREACH(some_cpu) {
610 cpu_index++;
611 }
612 return cpu_index;
613}
614
615void cpu_exec_exit(CPUState *cpu)
616{
617}
618#endif
619
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700620void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000621{
Andreas Färberb170fce2013-01-20 20:23:22 +0100622 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000623 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700624 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000625
Peter Maydell56943e82016-01-21 14:15:04 +0000626 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000627 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000628
Eduardo Habkost291135b2015-04-27 17:00:33 -0300629#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300630 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300631#endif
632
pbrookc2764712009-03-07 15:24:59 +0000633#if defined(CONFIG_USER_ONLY)
634 cpu_list_lock();
635#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700636 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
637 if (local_err) {
638 error_propagate(errp, local_err);
639#if defined(CONFIG_USER_ONLY)
640 cpu_list_unlock();
641#endif
642 return;
bellard6a00d602005-11-21 23:25:50 +0000643 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200644 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200648 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
649 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
650 }
pbrookb3c77242008-06-30 16:31:04 +0000651#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600652 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700653 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100654 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200655 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000656#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100657 if (cc->vmsd != NULL) {
658 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
659 }
bellardfd6ce8f2003-05-14 19:00:11 +0000660}
661
Paul Brook94df27f2010-02-28 23:47:45 +0000662#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200663static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000664{
665 tb_invalidate_phys_page_range(pc, pc + 1, 0);
666}
667#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200668static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400669{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000670 MemTxAttrs attrs;
671 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
672 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400673 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000674 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100675 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400676 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400677}
bellardc27004e2005-01-03 23:35:10 +0000678#endif
bellardd720b932004-04-25 17:57:43 +0000679
Paul Brookc527ee82010-03-01 03:31:14 +0000680#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200681void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000682
683{
684}
685
Peter Maydell3ee887e2014-09-12 14:06:48 +0100686int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
687 int flags)
688{
689 return -ENOSYS;
690}
691
692void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
693{
694}
695
Andreas Färber75a34032013-09-02 16:57:02 +0200696int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000697 int flags, CPUWatchpoint **watchpoint)
698{
699 return -ENOSYS;
700}
701#else
pbrook6658ffb2007-03-16 23:58:11 +0000702/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200703int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000704 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000705{
aliguoric0ce9982008-11-25 22:13:57 +0000706 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000707
Peter Maydell05068c02014-09-12 14:06:48 +0100708 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700709 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200710 error_report("tried to set invalid watchpoint at %"
711 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000712 return -EINVAL;
713 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500714 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000715
aliguoria1d1bb32008-11-18 20:07:32 +0000716 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100717 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000718 wp->flags = flags;
719
aliguori2dc9f412008-11-18 20:56:59 +0000720 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200721 if (flags & BP_GDB) {
722 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
723 } else {
724 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
725 }
aliguoria1d1bb32008-11-18 20:07:32 +0000726
Andreas Färber31b030d2013-09-04 01:29:02 +0200727 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000728
729 if (watchpoint)
730 *watchpoint = wp;
731 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000732}
733
aliguoria1d1bb32008-11-18 20:07:32 +0000734/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200735int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000736 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000737{
aliguoria1d1bb32008-11-18 20:07:32 +0000738 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000739
Andreas Färberff4700b2013-08-26 18:23:18 +0200740 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100741 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000742 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200743 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000744 return 0;
745 }
746 }
aliguoria1d1bb32008-11-18 20:07:32 +0000747 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000748}
749
aliguoria1d1bb32008-11-18 20:07:32 +0000750/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200751void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000752{
Andreas Färberff4700b2013-08-26 18:23:18 +0200753 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000754
Andreas Färber31b030d2013-09-04 01:29:02 +0200755 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Anthony Liguori7267c092011-08-20 22:09:37 -0500757 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000758}
759
aliguoria1d1bb32008-11-18 20:07:32 +0000760/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200761void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000762{
aliguoric0ce9982008-11-25 22:13:57 +0000763 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000764
Andreas Färberff4700b2013-08-26 18:23:18 +0200765 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200766 if (wp->flags & mask) {
767 cpu_watchpoint_remove_by_ref(cpu, wp);
768 }
aliguoric0ce9982008-11-25 22:13:57 +0000769 }
aliguoria1d1bb32008-11-18 20:07:32 +0000770}
Peter Maydell05068c02014-09-12 14:06:48 +0100771
772/* Return true if this watchpoint address matches the specified
773 * access (ie the address range covered by the watchpoint overlaps
774 * partially or completely with the address range covered by the
775 * access).
776 */
777static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
778 vaddr addr,
779 vaddr len)
780{
781 /* We know the lengths are non-zero, but a little caution is
782 * required to avoid errors in the case where the range ends
783 * exactly at the top of the address space and so addr + len
784 * wraps round to zero.
785 */
786 vaddr wpend = wp->vaddr + wp->len - 1;
787 vaddr addrend = addr + len - 1;
788
789 return !(addr > wpend || wp->vaddr > addrend);
790}
791
Paul Brookc527ee82010-03-01 03:31:14 +0000792#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000793
794/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200795int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000796 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000797{
aliguoric0ce9982008-11-25 22:13:57 +0000798 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000799
Anthony Liguori7267c092011-08-20 22:09:37 -0500800 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000801
802 bp->pc = pc;
803 bp->flags = flags;
804
aliguori2dc9f412008-11-18 20:56:59 +0000805 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200806 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200807 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200808 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200809 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200810 }
aliguoria1d1bb32008-11-18 20:07:32 +0000811
Andreas Färberf0c3c502013-08-26 21:22:53 +0200812 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000813
Andreas Färber00b941e2013-06-29 18:55:54 +0200814 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000815 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200816 }
aliguoria1d1bb32008-11-18 20:07:32 +0000817 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000818}
819
820/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200821int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000822{
aliguoria1d1bb32008-11-18 20:07:32 +0000823 CPUBreakpoint *bp;
824
Andreas Färberf0c3c502013-08-26 21:22:53 +0200825 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000826 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200827 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000828 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000829 }
bellard4c3a88a2003-07-26 12:06:08 +0000830 }
aliguoria1d1bb32008-11-18 20:07:32 +0000831 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000832}
833
aliguoria1d1bb32008-11-18 20:07:32 +0000834/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200835void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000836{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
838
839 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000840
Anthony Liguori7267c092011-08-20 22:09:37 -0500841 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000842}
843
844/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200845void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000846{
aliguoric0ce9982008-11-25 22:13:57 +0000847 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000848
Andreas Färberf0c3c502013-08-26 21:22:53 +0200849 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200850 if (bp->flags & mask) {
851 cpu_breakpoint_remove_by_ref(cpu, bp);
852 }
aliguoric0ce9982008-11-25 22:13:57 +0000853 }
bellard4c3a88a2003-07-26 12:06:08 +0000854}
855
bellardc33a3462003-07-29 20:50:33 +0000856/* enable or disable single step mode. EXCP_DEBUG is returned by the
857 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200858void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000859{
Andreas Färbered2803d2013-06-21 20:20:45 +0200860 if (cpu->singlestep_enabled != enabled) {
861 cpu->singlestep_enabled = enabled;
862 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200863 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200864 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100865 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000866 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700867 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000868 }
bellardc33a3462003-07-29 20:50:33 +0000869 }
bellardc33a3462003-07-29 20:50:33 +0000870}
871
Andreas Färbera47dddd2013-09-03 17:38:47 +0200872void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000873{
874 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000875 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000876
877 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000878 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000879 fprintf(stderr, "qemu: fatal: ");
880 vfprintf(stderr, fmt, ap);
881 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200882 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100883 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000884 qemu_log("qemu: fatal: ");
885 qemu_log_vprintf(fmt, ap2);
886 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200887 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000888 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000889 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000890 }
pbrook493ae1f2007-11-23 16:53:59 +0000891 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000892 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300893 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200894#if defined(CONFIG_USER_ONLY)
895 {
896 struct sigaction act;
897 sigfillset(&act.sa_mask);
898 act.sa_handler = SIG_DFL;
899 sigaction(SIGABRT, &act, NULL);
900 }
901#endif
bellard75012672003-06-21 13:11:07 +0000902 abort();
903}
904
bellard01243112004-01-04 15:48:17 +0000905#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400906/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
908{
909 RAMBlock *block;
910
Paolo Bonzini43771532013-09-09 17:58:40 +0200911 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200912 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200913 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200914 }
Mike Day0dc3f442013-09-05 14:41:35 -0400915 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200916 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200917 goto found;
918 }
919 }
920
921 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
922 abort();
923
924found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200925 /* It is safe to write mru_block outside the iothread lock. This
926 * is what happens:
927 *
928 * mru_block = xxx
929 * rcu_read_unlock()
930 * xxx removed from list
931 * rcu_read_lock()
932 * read mru_block
933 * mru_block = NULL;
934 * call_rcu(reclaim_ramblock, xxx);
935 * rcu_read_unlock()
936 *
937 * atomic_rcu_set is not needed here. The block was already published
938 * when it was placed into the list. Here we're just making an extra
939 * copy of the pointer.
940 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 ram_list.mru_block = block;
942 return block;
943}
944
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200945static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000946{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700947 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200948 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200949 RAMBlock *block;
950 ram_addr_t end;
951
952 end = TARGET_PAGE_ALIGN(start + length);
953 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000954
Mike Day0dc3f442013-09-05 14:41:35 -0400955 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200956 block = qemu_get_ram_block(start);
957 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200958 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700959 CPU_FOREACH(cpu) {
960 tlb_reset_dirty(cpu, start1, length);
961 }
Mike Day0dc3f442013-09-05 14:41:35 -0400962 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200963}
964
965/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000966bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
967 ram_addr_t length,
968 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200969{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000970 unsigned long end, page;
971 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200972
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000973 if (length == 0) {
974 return false;
975 }
976
977 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
978 page = start >> TARGET_PAGE_BITS;
979 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
980 page, end - page);
981
982 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200983 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200984 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000985
986 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000987}
988
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100989/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200990hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200991 MemoryRegionSection *section,
992 target_ulong vaddr,
993 hwaddr paddr, hwaddr xlat,
994 int prot,
995 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000996{
Avi Kivitya8170e52012-10-23 12:30:10 +0200997 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000998 CPUWatchpoint *wp;
999
Blue Swirlcc5bea62012-04-14 14:56:48 +00001000 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001001 /* Normal RAM. */
1002 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001003 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001004 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001005 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001006 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001007 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001008 }
1009 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001010 AddressSpaceDispatch *d;
1011
1012 d = atomic_rcu_read(&section->address_space->dispatch);
1013 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001014 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001015 }
1016
1017 /* Make accesses to pages with watchpoints go via the
1018 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001019 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001020 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001021 /* Avoid trapping reads of pages with a write breakpoint. */
1022 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001023 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001024 *address |= TLB_MMIO;
1025 break;
1026 }
1027 }
1028 }
1029
1030 return iotlb;
1031}
bellard9fa3e852004-01-04 18:06:42 +00001032#endif /* defined(CONFIG_USER_ONLY) */
1033
pbrooke2eef172008-06-08 01:09:01 +00001034#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001035
Anthony Liguoric227f092009-10-01 16:12:16 -05001036static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001037 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001038static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001039
Igor Mammedova2b257d2014-10-31 16:38:37 +00001040static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1041 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001042
1043/*
1044 * Set a custom physical guest memory alloator.
1045 * Accelerators with unusual needs may need this. Hopefully, we can
1046 * get rid of it eventually.
1047 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001048void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001049{
1050 phys_mem_alloc = alloc;
1051}
1052
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053static uint16_t phys_section_add(PhysPageMap *map,
1054 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001055{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001056 /* The physical section number is ORed with a page-aligned
1057 * pointer to produce the iotlb entries. Thus it should
1058 * never overflow into the page-aligned value.
1059 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001061
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001062 if (map->sections_nb == map->sections_nb_alloc) {
1063 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1064 map->sections = g_renew(MemoryRegionSection, map->sections,
1065 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001066 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001067 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001068 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001069 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001070}
1071
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001072static void phys_section_destroy(MemoryRegion *mr)
1073{
Don Slutz55b4e802015-11-30 17:11:04 -05001074 bool have_sub_page = mr->subpage;
1075
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001076 memory_region_unref(mr);
1077
Don Slutz55b4e802015-11-30 17:11:04 -05001078 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001079 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001080 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001081 g_free(subpage);
1082 }
1083}
1084
Paolo Bonzini60926662013-05-29 12:30:26 +02001085static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001086{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001087 while (map->sections_nb > 0) {
1088 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001089 phys_section_destroy(section->mr);
1090 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001091 g_free(map->sections);
1092 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001093}
1094
Avi Kivityac1970f2012-10-03 16:22:53 +02001095static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096{
1097 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001098 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001099 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001100 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001101 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102 MemoryRegionSection subsection = {
1103 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001104 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001105 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001106 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107
Avi Kivityf3705d52012-03-08 16:16:34 +02001108 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109
Avi Kivityf3705d52012-03-08 16:16:34 +02001110 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001111 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001112 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001113 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001114 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001115 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001116 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001117 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001118 }
1119 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001120 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001121 subpage_register(subpage, start, end,
1122 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001123}
1124
1125
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001126static void register_multipage(AddressSpaceDispatch *d,
1127 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001128{
Avi Kivitya8170e52012-10-23 12:30:10 +02001129 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001130 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001131 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1132 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001133
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001134 assert(num_pages);
1135 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001136}
1137
Avi Kivityac1970f2012-10-03 16:22:53 +02001138static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001139{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001140 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001141 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001142 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001145 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1146 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1147 - now.offset_within_address_space;
1148
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001149 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001150 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001151 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001152 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001153 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 while (int128_ne(remain.size, now.size)) {
1155 remain.size = int128_sub(remain.size, now.size);
1156 remain.offset_within_address_space += int128_get64(now.size);
1157 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001158 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001160 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001161 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001162 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001163 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001164 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001165 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001166 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001167 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001168 }
1169}
1170
Sheng Yang62a27442010-01-26 19:21:16 +08001171void qemu_flush_coalesced_mmio_buffer(void)
1172{
1173 if (kvm_enabled())
1174 kvm_flush_coalesced_mmio_buffer();
1175}
1176
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001177void qemu_mutex_lock_ramlist(void)
1178{
1179 qemu_mutex_lock(&ram_list.mutex);
1180}
1181
1182void qemu_mutex_unlock_ramlist(void)
1183{
1184 qemu_mutex_unlock(&ram_list.mutex);
1185}
1186
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001187#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001188
1189#include <sys/vfs.h>
1190
1191#define HUGETLBFS_MAGIC 0x958458f6
1192
Hu Taofc7a5802014-09-09 13:28:01 +08001193static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001194{
1195 struct statfs fs;
1196 int ret;
1197
1198 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001199 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001200 } while (ret != 0 && errno == EINTR);
1201
1202 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001203 error_setg_errno(errp, errno, "failed to get page size of file %s",
1204 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001205 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001206 }
1207
Marcelo Tosattic9027602010-03-01 20:25:08 -03001208 return fs.f_bsize;
1209}
1210
Alex Williamson04b16652010-07-02 11:13:17 -06001211static void *file_ram_alloc(RAMBlock *block,
1212 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001213 const char *path,
1214 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001216 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001217 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001218 char *sanitized_name;
1219 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001220 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001222 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001223 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224
Hu Taofc7a5802014-09-09 13:28:01 +08001225 hpagesize = gethugepagesize(path, &local_err);
1226 if (local_err) {
1227 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001228 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001229 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001230 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231
1232 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001233 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1234 "or larger than huge page size 0x%" PRIx64,
1235 memory, hpagesize);
1236 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237 }
1238
1239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001240 error_setg(errp,
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001242 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001243 }
1244
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001245 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1246 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1247 sanitized_name = g_strdup(memory_region_name(block->mr));
1248 for (c = sanitized_name; *c != '\0'; c++) {
1249 if (*c == '/') {
1250 *c = '_';
1251 }
1252 }
1253
1254 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1255 sanitized_name);
1256 g_free(sanitized_name);
1257
1258 fd = mkstemp(filename);
1259 if (fd >= 0) {
1260 unlink(filename);
1261 }
1262 g_free(filename);
1263 } else {
1264 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001265 }
1266
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001268 error_setg_errno(errp, errno,
1269 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001270 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001271 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001272
Chen Hanxiao9284f312015-07-24 11:12:03 +08001273 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274
1275 /*
1276 * ftruncate is not supported by hugetlbfs in older
1277 * hosts, so don't bother bailing out on errors.
1278 * If anything goes wrong with it under other filesystems,
1279 * mmap will fail.
1280 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001281 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001282 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001283 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001284
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001285 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001286 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001287 error_setg_errno(errp, errno,
1288 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001289 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001290 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001291 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001292
1293 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001294 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001295 }
1296
Alex Williamson04b16652010-07-02 11:13:17 -06001297 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001298 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001299
1300error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001301 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001302}
1303#endif
1304
Mike Day0dc3f442013-09-05 14:41:35 -04001305/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001306static ram_addr_t find_ram_offset(ram_addr_t size)
1307{
Alex Williamson04b16652010-07-02 11:13:17 -06001308 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001309 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001310
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001311 assert(size != 0); /* it would hand out same offset multiple times */
1312
Mike Day0dc3f442013-09-05 14:41:35 -04001313 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001314 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001315 }
Alex Williamson04b16652010-07-02 11:13:17 -06001316
Mike Day0dc3f442013-09-05 14:41:35 -04001317 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001318 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001319
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001320 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001321
Mike Day0dc3f442013-09-05 14:41:35 -04001322 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001323 if (next_block->offset >= end) {
1324 next = MIN(next, next_block->offset);
1325 }
1326 }
1327 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001328 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001329 mingap = next - end;
1330 }
1331 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001332
1333 if (offset == RAM_ADDR_MAX) {
1334 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1335 (uint64_t)size);
1336 abort();
1337 }
1338
Alex Williamson04b16652010-07-02 11:13:17 -06001339 return offset;
1340}
1341
Juan Quintela652d7ec2012-07-20 10:37:54 +02001342ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001343{
Alex Williamsond17b5282010-06-25 11:08:38 -06001344 RAMBlock *block;
1345 ram_addr_t last = 0;
1346
Mike Day0dc3f442013-09-05 14:41:35 -04001347 rcu_read_lock();
1348 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001349 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001350 }
Mike Day0dc3f442013-09-05 14:41:35 -04001351 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001352 return last;
1353}
1354
Jason Baronddb97f12012-08-02 15:44:16 -04001355static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1356{
1357 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001358
1359 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001360 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001361 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1362 if (ret) {
1363 perror("qemu_madvise");
1364 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1365 "but dump_guest_core=off specified\n");
1366 }
1367 }
1368}
1369
Mike Day0dc3f442013-09-05 14:41:35 -04001370/* Called within an RCU critical section, or while the ramlist lock
1371 * is held.
1372 */
Hu Tao20cfe882014-04-02 15:13:26 +08001373static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001374{
Hu Tao20cfe882014-04-02 15:13:26 +08001375 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001376
Mike Day0dc3f442013-09-05 14:41:35 -04001377 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001378 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001379 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001380 }
1381 }
Hu Tao20cfe882014-04-02 15:13:26 +08001382
1383 return NULL;
1384}
1385
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001386const char *qemu_ram_get_idstr(RAMBlock *rb)
1387{
1388 return rb->idstr;
1389}
1390
Mike Dayae3a7042013-09-05 14:41:35 -04001391/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001392void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1393{
Mike Dayae3a7042013-09-05 14:41:35 -04001394 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001395
Mike Day0dc3f442013-09-05 14:41:35 -04001396 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001397 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001398 assert(new_block);
1399 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001400
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001401 if (dev) {
1402 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001403 if (id) {
1404 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001405 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001406 }
1407 }
1408 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1409
Mike Day0dc3f442013-09-05 14:41:35 -04001410 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001411 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001412 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1413 new_block->idstr);
1414 abort();
1415 }
1416 }
Mike Day0dc3f442013-09-05 14:41:35 -04001417 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001418}
1419
Mike Dayae3a7042013-09-05 14:41:35 -04001420/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001421void qemu_ram_unset_idstr(ram_addr_t addr)
1422{
Mike Dayae3a7042013-09-05 14:41:35 -04001423 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001424
Mike Dayae3a7042013-09-05 14:41:35 -04001425 /* FIXME: arch_init.c assumes that this is not called throughout
1426 * migration. Ignore the problem since hot-unplug during migration
1427 * does not work anyway.
1428 */
1429
Mike Day0dc3f442013-09-05 14:41:35 -04001430 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001431 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001432 if (block) {
1433 memset(block->idstr, 0, sizeof(block->idstr));
1434 }
Mike Day0dc3f442013-09-05 14:41:35 -04001435 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001436}
1437
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001438static int memory_try_enable_merging(void *addr, size_t len)
1439{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001440 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001441 /* disabled by the user */
1442 return 0;
1443 }
1444
1445 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1446}
1447
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001448/* Only legal before guest might have detected the memory size: e.g. on
1449 * incoming migration, or right after reset.
1450 *
1451 * As memory core doesn't know how is memory accessed, it is up to
1452 * resize callback to update device state and/or add assertions to detect
1453 * misuse, if necessary.
1454 */
1455int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1456{
1457 RAMBlock *block = find_ram_block(base);
1458
1459 assert(block);
1460
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001461 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001462
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001463 if (block->used_length == newsize) {
1464 return 0;
1465 }
1466
1467 if (!(block->flags & RAM_RESIZEABLE)) {
1468 error_setg_errno(errp, EINVAL,
1469 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1470 " in != 0x" RAM_ADDR_FMT, block->idstr,
1471 newsize, block->used_length);
1472 return -EINVAL;
1473 }
1474
1475 if (block->max_length < newsize) {
1476 error_setg_errno(errp, EINVAL,
1477 "Length too large: %s: 0x" RAM_ADDR_FMT
1478 " > 0x" RAM_ADDR_FMT, block->idstr,
1479 newsize, block->max_length);
1480 return -EINVAL;
1481 }
1482
1483 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1484 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001485 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1486 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001487 memory_region_set_size(block->mr, newsize);
1488 if (block->resized) {
1489 block->resized(block->idstr, newsize, block->host);
1490 }
1491 return 0;
1492}
1493
Hu Taoef701d72014-09-09 13:27:54 +08001494static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001495{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001496 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001497 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001498 ram_addr_t old_ram_size, new_ram_size;
1499
1500 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001501
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001502 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001503 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001504
1505 if (!new_block->host) {
1506 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001507 xen_ram_alloc(new_block->offset, new_block->max_length,
1508 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001509 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001510 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001511 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001512 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001513 error_setg_errno(errp, errno,
1514 "cannot set up guest memory '%s'",
1515 memory_region_name(new_block->mr));
1516 qemu_mutex_unlock_ramlist();
1517 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001518 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001519 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001520 }
1521 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001522
Li Zhijiandd631692015-07-02 20:18:06 +08001523 new_ram_size = MAX(old_ram_size,
1524 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1525 if (new_ram_size > old_ram_size) {
1526 migration_bitmap_extend(old_ram_size, new_ram_size);
1527 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001528 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1529 * QLIST (which has an RCU-friendly variant) does not have insertion at
1530 * tail, so save the last element in last_block.
1531 */
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001533 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001534 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001535 break;
1536 }
1537 }
1538 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001539 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001540 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001541 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001542 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001543 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001544 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001545 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001546
Mike Day0dc3f442013-09-05 14:41:35 -04001547 /* Write list before version */
1548 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001549 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001550 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001551
Juan Quintela2152f5c2013-10-08 13:52:02 +02001552 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1553
1554 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001555 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001556
1557 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001558 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1559 ram_list.dirty_memory[i] =
1560 bitmap_zero_extend(ram_list.dirty_memory[i],
1561 old_ram_size, new_ram_size);
1562 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001563 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001564 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001565 new_block->used_length,
1566 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001567
Paolo Bonzinia904c912015-01-21 16:18:35 +01001568 if (new_block->host) {
1569 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1570 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1571 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1572 if (kvm_enabled()) {
1573 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1574 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001576
1577 return new_block->offset;
1578}
1579
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001580#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001582 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001583 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001584{
1585 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001586 ram_addr_t addr;
1587 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001588
1589 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001590 error_setg(errp, "-mem-path not supported with Xen");
1591 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592 }
1593
1594 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1595 /*
1596 * file_ram_alloc() needs to allocate just like
1597 * phys_mem_alloc, but we haven't bothered to provide
1598 * a hook there.
1599 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001600 error_setg(errp,
1601 "-mem-path not supported with this accelerator");
1602 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001603 }
1604
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001605 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001606 new_block = g_malloc0(sizeof(*new_block));
1607 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001608 new_block->used_length = size;
1609 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001610 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001611 new_block->host = file_ram_alloc(new_block, size,
1612 mem_path, errp);
1613 if (!new_block->host) {
1614 g_free(new_block);
1615 return -1;
1616 }
1617
Hu Taoef701d72014-09-09 13:27:54 +08001618 addr = ram_block_add(new_block, &local_err);
1619 if (local_err) {
1620 g_free(new_block);
1621 error_propagate(errp, local_err);
1622 return -1;
1623 }
1624 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001625}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001626#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001627
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001628static
1629ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1630 void (*resized)(const char*,
1631 uint64_t length,
1632 void *host),
1633 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001634 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001635{
1636 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001637 ram_addr_t addr;
1638 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001639
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001640 size = HOST_PAGE_ALIGN(size);
1641 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642 new_block = g_malloc0(sizeof(*new_block));
1643 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001644 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001645 new_block->used_length = size;
1646 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001647 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001648 new_block->fd = -1;
1649 new_block->host = host;
1650 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001651 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001652 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001653 if (resizeable) {
1654 new_block->flags |= RAM_RESIZEABLE;
1655 }
Hu Taoef701d72014-09-09 13:27:54 +08001656 addr = ram_block_add(new_block, &local_err);
1657 if (local_err) {
1658 g_free(new_block);
1659 error_propagate(errp, local_err);
1660 return -1;
1661 }
1662 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001663}
1664
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001665ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1666 MemoryRegion *mr, Error **errp)
1667{
1668 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1669}
1670
Hu Taoef701d72014-09-09 13:27:54 +08001671ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001672{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001673 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1674}
1675
1676ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1677 void (*resized)(const char*,
1678 uint64_t length,
1679 void *host),
1680 MemoryRegion *mr, Error **errp)
1681{
1682 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001683}
bellarde9a1ab12007-02-08 23:08:38 +00001684
Paolo Bonzini43771532013-09-09 17:58:40 +02001685static void reclaim_ramblock(RAMBlock *block)
1686{
1687 if (block->flags & RAM_PREALLOC) {
1688 ;
1689 } else if (xen_enabled()) {
1690 xen_invalidate_map_cache_entry(block->host);
1691#ifndef _WIN32
1692 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001693 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001694 close(block->fd);
1695#endif
1696 } else {
1697 qemu_anon_ram_free(block->host, block->max_length);
1698 }
1699 g_free(block);
1700}
1701
Anthony Liguoric227f092009-10-01 16:12:16 -05001702void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001703{
Alex Williamson04b16652010-07-02 11:13:17 -06001704 RAMBlock *block;
1705
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001706 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001707 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001708 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001709 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001710 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001711 /* Write list before version */
1712 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001713 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001714 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001715 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001716 }
1717 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001718 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001719}
1720
Huang Yingcd19cfa2011-03-02 08:56:19 +01001721#ifndef _WIN32
1722void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1723{
1724 RAMBlock *block;
1725 ram_addr_t offset;
1726 int flags;
1727 void *area, *vaddr;
1728
Mike Day0dc3f442013-09-05 14:41:35 -04001729 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001730 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001731 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001732 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001733 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001734 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001735 } else if (xen_enabled()) {
1736 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001737 } else {
1738 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001739 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001740 flags |= (block->flags & RAM_SHARED ?
1741 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001742 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1743 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001744 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001745 /*
1746 * Remap needs to match alloc. Accelerators that
1747 * set phys_mem_alloc never remap. If they did,
1748 * we'd need a remap hook here.
1749 */
1750 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1751
Huang Yingcd19cfa2011-03-02 08:56:19 +01001752 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1753 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1754 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001755 }
1756 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001757 fprintf(stderr, "Could not remap addr: "
1758 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001759 length, addr);
1760 exit(1);
1761 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001762 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001763 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001764 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001765 }
1766 }
1767}
1768#endif /* !_WIN32 */
1769
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001770int qemu_get_ram_fd(ram_addr_t addr)
1771{
Mike Dayae3a7042013-09-05 14:41:35 -04001772 RAMBlock *block;
1773 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001774
Mike Day0dc3f442013-09-05 14:41:35 -04001775 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001776 block = qemu_get_ram_block(addr);
1777 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001778 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001779 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001780}
1781
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001782void qemu_set_ram_fd(ram_addr_t addr, int fd)
1783{
1784 RAMBlock *block;
1785
1786 rcu_read_lock();
1787 block = qemu_get_ram_block(addr);
1788 block->fd = fd;
1789 rcu_read_unlock();
1790}
1791
Damjan Marion3fd74b82014-06-26 23:01:32 +02001792void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1793{
Mike Dayae3a7042013-09-05 14:41:35 -04001794 RAMBlock *block;
1795 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001796
Mike Day0dc3f442013-09-05 14:41:35 -04001797 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001798 block = qemu_get_ram_block(addr);
1799 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001800 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001801 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001802}
1803
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001804/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001805 * This should not be used for general purpose DMA. Use address_space_map
1806 * or address_space_rw instead. For local memory (e.g. video ram) that the
1807 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001808 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001809 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001810 */
1811void *qemu_get_ram_ptr(ram_addr_t addr)
1812{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001813 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001814
1815 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001816 /* We need to check if the requested address is in the RAM
1817 * because we don't want to map the entire memory in QEMU.
1818 * In that case just map until the end of the page.
1819 */
1820 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001821 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001822 }
Mike Dayae3a7042013-09-05 14:41:35 -04001823
1824 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001825 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001826 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001827}
1828
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001829/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001830 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001831 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001832 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001833 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001834static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001835{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001836 RAMBlock *block;
1837 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001838 if (*size == 0) {
1839 return NULL;
1840 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001841
1842 block = qemu_get_ram_block(addr);
1843 offset_inside_block = addr - block->offset;
1844 *size = MIN(*size, block->max_length - offset_inside_block);
1845
1846 if (xen_enabled() && block->host == NULL) {
1847 /* We need to check if the requested address is in the RAM
1848 * because we don't want to map the entire memory in QEMU.
1849 * In that case just map the requested area.
1850 */
1851 if (block->offset == 0) {
1852 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001853 }
1854
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001855 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001856 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001857
1858 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001859}
1860
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001861/*
1862 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1863 * in that RAMBlock.
1864 *
1865 * ptr: Host pointer to look up
1866 * round_offset: If true round the result offset down to a page boundary
1867 * *ram_addr: set to result ram_addr
1868 * *offset: set to result offset within the RAMBlock
1869 *
1870 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001871 *
1872 * By the time this function returns, the returned pointer is not protected
1873 * by RCU anymore. If the caller is not within an RCU critical section and
1874 * does not hold the iothread lock, it must have other means of protecting the
1875 * pointer, such as a reference to the region that includes the incoming
1876 * ram_addr_t.
1877 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001878RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1879 ram_addr_t *ram_addr,
1880 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001881{
pbrook94a6b542009-04-11 17:15:54 +00001882 RAMBlock *block;
1883 uint8_t *host = ptr;
1884
Jan Kiszka868bb332011-06-21 22:59:09 +02001885 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001886 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001887 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001888 block = qemu_get_ram_block(*ram_addr);
1889 if (block) {
1890 *offset = (host - block->host);
1891 }
Mike Day0dc3f442013-09-05 14:41:35 -04001892 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001893 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001894 }
1895
Mike Day0dc3f442013-09-05 14:41:35 -04001896 rcu_read_lock();
1897 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001898 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001899 goto found;
1900 }
1901
Mike Day0dc3f442013-09-05 14:41:35 -04001902 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001903 /* This case append when the block is not mapped. */
1904 if (block->host == NULL) {
1905 continue;
1906 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001907 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001908 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001909 }
pbrook94a6b542009-04-11 17:15:54 +00001910 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001911
Mike Day0dc3f442013-09-05 14:41:35 -04001912 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001913 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001914
1915found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001916 *offset = (host - block->host);
1917 if (round_offset) {
1918 *offset &= TARGET_PAGE_MASK;
1919 }
1920 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001921 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001922 return block;
1923}
1924
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001925/*
1926 * Finds the named RAMBlock
1927 *
1928 * name: The name of RAMBlock to find
1929 *
1930 * Returns: RAMBlock (or NULL if not found)
1931 */
1932RAMBlock *qemu_ram_block_by_name(const char *name)
1933{
1934 RAMBlock *block;
1935
1936 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1937 if (!strcmp(name, block->idstr)) {
1938 return block;
1939 }
1940 }
1941
1942 return NULL;
1943}
1944
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001945/* Some of the softmmu routines need to translate from a host pointer
1946 (typically a TLB entry) back to a ram offset. */
1947MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1948{
1949 RAMBlock *block;
1950 ram_addr_t offset; /* Not used */
1951
1952 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1953
1954 if (!block) {
1955 return NULL;
1956 }
1957
1958 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001959}
Alex Williamsonf471a172010-06-11 11:11:42 -06001960
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001961/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001962static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001963 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001964{
Juan Quintela52159192013-10-08 12:44:04 +02001965 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001966 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001967 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001968 switch (size) {
1969 case 1:
1970 stb_p(qemu_get_ram_ptr(ram_addr), val);
1971 break;
1972 case 2:
1973 stw_p(qemu_get_ram_ptr(ram_addr), val);
1974 break;
1975 case 4:
1976 stl_p(qemu_get_ram_ptr(ram_addr), val);
1977 break;
1978 default:
1979 abort();
1980 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001981 /* Set both VGA and migration bits for simplicity and to remove
1982 * the notdirty callback faster.
1983 */
1984 cpu_physical_memory_set_dirty_range(ram_addr, size,
1985 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001986 /* we remove the notdirty callback only if the code has been
1987 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001988 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001989 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001990 }
bellard1ccde1c2004-02-06 19:46:14 +00001991}
1992
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001993static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1994 unsigned size, bool is_write)
1995{
1996 return is_write;
1997}
1998
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001999static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002000 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002001 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002002 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002003};
2004
pbrook0f459d12008-06-09 00:20:13 +00002005/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002006static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002007{
Andreas Färber93afead2013-08-26 03:41:01 +02002008 CPUState *cpu = current_cpu;
2009 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002010 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002011 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002012 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002013 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002014
Andreas Färberff4700b2013-08-26 18:23:18 +02002015 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002016 /* We re-entered the check after replacing the TB. Now raise
2017 * the debug interrupt so that is will trigger after the
2018 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002019 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002020 return;
2021 }
Andreas Färber93afead2013-08-26 03:41:01 +02002022 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002023 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002024 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2025 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002026 if (flags == BP_MEM_READ) {
2027 wp->flags |= BP_WATCHPOINT_HIT_READ;
2028 } else {
2029 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2030 }
2031 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002032 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002033 if (!cpu->watchpoint_hit) {
2034 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002035 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002036 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002037 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002038 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002039 } else {
2040 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002041 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002042 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002043 }
aliguori06d55cc2008-11-18 20:24:06 +00002044 }
aliguori6e140f22008-11-18 20:37:55 +00002045 } else {
2046 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002047 }
2048 }
2049}
2050
pbrook6658ffb2007-03-16 23:58:11 +00002051/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2052 so these check for a hit then pass through to the normal out-of-line
2053 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002054static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2055 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002056{
Peter Maydell66b9b432015-04-26 16:49:24 +01002057 MemTxResult res;
2058 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002059 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2060 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002061
Peter Maydell66b9b432015-04-26 16:49:24 +01002062 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002063 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002064 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002065 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002066 break;
2067 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002068 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002069 break;
2070 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002071 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002072 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002073 default: abort();
2074 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002075 *pdata = data;
2076 return res;
2077}
2078
2079static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2080 uint64_t val, unsigned size,
2081 MemTxAttrs attrs)
2082{
2083 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002084 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2085 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002086
2087 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2088 switch (size) {
2089 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002090 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002091 break;
2092 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002093 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002094 break;
2095 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002096 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002097 break;
2098 default: abort();
2099 }
2100 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002101}
2102
Avi Kivity1ec9b902012-01-02 12:47:48 +02002103static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002104 .read_with_attrs = watch_mem_read,
2105 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002106 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002107};
pbrook6658ffb2007-03-16 23:58:11 +00002108
Peter Maydellf25a49e2015-04-26 16:49:24 +01002109static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2110 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002111{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002112 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002113 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002114 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002115
blueswir1db7b5422007-05-26 17:36:03 +00002116#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002117 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002118 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002119#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002120 res = address_space_read(subpage->as, addr + subpage->base,
2121 attrs, buf, len);
2122 if (res) {
2123 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002124 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002125 switch (len) {
2126 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002127 *data = ldub_p(buf);
2128 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002129 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002130 *data = lduw_p(buf);
2131 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002132 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002133 *data = ldl_p(buf);
2134 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002135 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002136 *data = ldq_p(buf);
2137 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002138 default:
2139 abort();
2140 }
blueswir1db7b5422007-05-26 17:36:03 +00002141}
2142
Peter Maydellf25a49e2015-04-26 16:49:24 +01002143static MemTxResult subpage_write(void *opaque, hwaddr addr,
2144 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002145{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002146 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002147 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002148
blueswir1db7b5422007-05-26 17:36:03 +00002149#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002150 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002151 " value %"PRIx64"\n",
2152 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002153#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002154 switch (len) {
2155 case 1:
2156 stb_p(buf, value);
2157 break;
2158 case 2:
2159 stw_p(buf, value);
2160 break;
2161 case 4:
2162 stl_p(buf, value);
2163 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002164 case 8:
2165 stq_p(buf, value);
2166 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002167 default:
2168 abort();
2169 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002170 return address_space_write(subpage->as, addr + subpage->base,
2171 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002172}
2173
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002174static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002175 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002176{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002177 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002178#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002179 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002180 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002181#endif
2182
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002183 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002184 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002185}
2186
Avi Kivity70c68e42012-01-02 12:32:48 +02002187static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002188 .read_with_attrs = subpage_read,
2189 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002190 .impl.min_access_size = 1,
2191 .impl.max_access_size = 8,
2192 .valid.min_access_size = 1,
2193 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002194 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002195 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002196};
2197
Anthony Liguoric227f092009-10-01 16:12:16 -05002198static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002199 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002200{
2201 int idx, eidx;
2202
2203 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2204 return -1;
2205 idx = SUBPAGE_IDX(start);
2206 eidx = SUBPAGE_IDX(end);
2207#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002208 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2209 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002210#endif
blueswir1db7b5422007-05-26 17:36:03 +00002211 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002212 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002213 }
2214
2215 return 0;
2216}
2217
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002218static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002219{
Anthony Liguoric227f092009-10-01 16:12:16 -05002220 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002221
Anthony Liguori7267c092011-08-20 22:09:37 -05002222 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002223
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002224 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002225 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002226 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002227 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002228 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002229#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002230 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2231 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002232#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002233 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002234
2235 return mmio;
2236}
2237
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002238static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2239 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002240{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002241 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002242 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002243 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002244 .mr = mr,
2245 .offset_within_address_space = 0,
2246 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002247 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002248 };
2249
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002250 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002251}
2252
Peter Maydella54c87b2016-01-21 14:15:05 +00002253MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002254{
Peter Maydella54c87b2016-01-21 14:15:05 +00002255 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2256 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002257 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002258 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002259
2260 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002261}
2262
Avi Kivitye9179ce2009-06-14 11:38:52 +03002263static void io_mem_init(void)
2264{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002265 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002266 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002267 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002268 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002269 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002270 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002271 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002272}
2273
Avi Kivityac1970f2012-10-03 16:22:53 +02002274static void mem_begin(MemoryListener *listener)
2275{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002276 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002277 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2278 uint16_t n;
2279
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002280 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002281 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002282 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002283 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002284 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002285 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002286 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002287 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002288
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002289 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002290 d->as = as;
2291 as->next_dispatch = d;
2292}
2293
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002294static void address_space_dispatch_free(AddressSpaceDispatch *d)
2295{
2296 phys_sections_free(&d->map);
2297 g_free(d);
2298}
2299
Paolo Bonzini00752702013-05-29 12:13:54 +02002300static void mem_commit(MemoryListener *listener)
2301{
2302 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002303 AddressSpaceDispatch *cur = as->dispatch;
2304 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002305
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002306 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002307
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002308 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002309 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002310 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002311 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002312}
2313
Avi Kivity1d711482012-10-02 18:54:45 +02002314static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002315{
Peter Maydell32857f42015-10-01 15:29:50 +01002316 CPUAddressSpace *cpuas;
2317 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002318
2319 /* since each CPU stores ram addresses in its TLB cache, we must
2320 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002321 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2322 cpu_reloading_memory_map();
2323 /* The CPU and TLB are protected by the iothread lock.
2324 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2325 * may have split the RCU critical section.
2326 */
2327 d = atomic_rcu_read(&cpuas->as->dispatch);
2328 cpuas->memory_dispatch = d;
2329 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002330}
2331
Avi Kivityac1970f2012-10-03 16:22:53 +02002332void address_space_init_dispatch(AddressSpace *as)
2333{
Paolo Bonzini00752702013-05-29 12:13:54 +02002334 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002335 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002336 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002337 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002338 .region_add = mem_add,
2339 .region_nop = mem_add,
2340 .priority = 0,
2341 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002342 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002343}
2344
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002345void address_space_unregister(AddressSpace *as)
2346{
2347 memory_listener_unregister(&as->dispatch_listener);
2348}
2349
Avi Kivity83f3c252012-10-07 12:59:55 +02002350void address_space_destroy_dispatch(AddressSpace *as)
2351{
2352 AddressSpaceDispatch *d = as->dispatch;
2353
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002354 atomic_rcu_set(&as->dispatch, NULL);
2355 if (d) {
2356 call_rcu(d, address_space_dispatch_free, rcu);
2357 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002358}
2359
Avi Kivity62152b82011-07-26 14:26:14 +03002360static void memory_map_init(void)
2361{
Anthony Liguori7267c092011-08-20 22:09:37 -05002362 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002363
Paolo Bonzini57271d62013-11-07 17:14:37 +01002364 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002365 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002366
Anthony Liguori7267c092011-08-20 22:09:37 -05002367 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002368 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2369 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002370 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002371}
2372
2373MemoryRegion *get_system_memory(void)
2374{
2375 return system_memory;
2376}
2377
Avi Kivity309cb472011-08-08 16:09:03 +03002378MemoryRegion *get_system_io(void)
2379{
2380 return system_io;
2381}
2382
pbrooke2eef172008-06-08 01:09:01 +00002383#endif /* !defined(CONFIG_USER_ONLY) */
2384
bellard13eb76e2004-01-24 15:23:36 +00002385/* physical memory access (slow version, mainly for debug) */
2386#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002387int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002388 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002389{
2390 int l, flags;
2391 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002392 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002393
2394 while (len > 0) {
2395 page = addr & TARGET_PAGE_MASK;
2396 l = (page + TARGET_PAGE_SIZE) - addr;
2397 if (l > len)
2398 l = len;
2399 flags = page_get_flags(page);
2400 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002401 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002402 if (is_write) {
2403 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002404 return -1;
bellard579a97f2007-11-11 14:26:47 +00002405 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002406 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002407 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002408 memcpy(p, buf, l);
2409 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002410 } else {
2411 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002412 return -1;
bellard579a97f2007-11-11 14:26:47 +00002413 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002414 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002415 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002416 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002417 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002418 }
2419 len -= l;
2420 buf += l;
2421 addr += l;
2422 }
Paul Brooka68fe892010-03-01 00:08:59 +00002423 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002424}
bellard8df1cd02005-01-28 22:37:22 +00002425
bellard13eb76e2004-01-24 15:23:36 +00002426#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002427
Paolo Bonzini845b6212015-03-23 11:45:53 +01002428static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002429 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002430{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002431 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2432 /* No early return if dirty_log_mask is or becomes 0, because
2433 * cpu_physical_memory_set_dirty_range will still call
2434 * xen_modified_memory.
2435 */
2436 if (dirty_log_mask) {
2437 dirty_log_mask =
2438 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002439 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002440 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2441 tb_invalidate_phys_range(addr, addr + length);
2442 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2443 }
2444 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002445}
2446
Richard Henderson23326162013-07-08 14:55:59 -07002447static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002448{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002449 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002450
2451 /* Regions are assumed to support 1-4 byte accesses unless
2452 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002453 if (access_size_max == 0) {
2454 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002455 }
Richard Henderson23326162013-07-08 14:55:59 -07002456
2457 /* Bound the maximum access by the alignment of the address. */
2458 if (!mr->ops->impl.unaligned) {
2459 unsigned align_size_max = addr & -addr;
2460 if (align_size_max != 0 && align_size_max < access_size_max) {
2461 access_size_max = align_size_max;
2462 }
2463 }
2464
2465 /* Don't attempt accesses larger than the maximum. */
2466 if (l > access_size_max) {
2467 l = access_size_max;
2468 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002469 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002470
2471 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002472}
2473
Jan Kiszka4840f102015-06-18 18:47:22 +02002474static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002475{
Jan Kiszka4840f102015-06-18 18:47:22 +02002476 bool unlocked = !qemu_mutex_iothread_locked();
2477 bool release_lock = false;
2478
2479 if (unlocked && mr->global_locking) {
2480 qemu_mutex_lock_iothread();
2481 unlocked = false;
2482 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002483 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002484 if (mr->flush_coalesced_mmio) {
2485 if (unlocked) {
2486 qemu_mutex_lock_iothread();
2487 }
2488 qemu_flush_coalesced_mmio_buffer();
2489 if (unlocked) {
2490 qemu_mutex_unlock_iothread();
2491 }
2492 }
2493
2494 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002495}
2496
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002497/* Called within RCU critical section. */
2498static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2499 MemTxAttrs attrs,
2500 const uint8_t *buf,
2501 int len, hwaddr addr1,
2502 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002503{
bellard13eb76e2004-01-24 15:23:36 +00002504 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002505 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002506 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002507 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002508
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002509 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002510 if (!memory_access_is_direct(mr, true)) {
2511 release_lock |= prepare_mmio_access(mr);
2512 l = memory_access_size(mr, l, addr1);
2513 /* XXX: could force current_cpu to NULL to avoid
2514 potential bugs */
2515 switch (l) {
2516 case 8:
2517 /* 64 bit write access */
2518 val = ldq_p(buf);
2519 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2520 attrs);
2521 break;
2522 case 4:
2523 /* 32 bit write access */
2524 val = ldl_p(buf);
2525 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2526 attrs);
2527 break;
2528 case 2:
2529 /* 16 bit write access */
2530 val = lduw_p(buf);
2531 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2532 attrs);
2533 break;
2534 case 1:
2535 /* 8 bit write access */
2536 val = ldub_p(buf);
2537 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2538 attrs);
2539 break;
2540 default:
2541 abort();
bellard13eb76e2004-01-24 15:23:36 +00002542 }
2543 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002544 addr1 += memory_region_get_ram_addr(mr);
2545 /* RAM case */
2546 ptr = qemu_get_ram_ptr(addr1);
2547 memcpy(ptr, buf, l);
2548 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002549 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002550
2551 if (release_lock) {
2552 qemu_mutex_unlock_iothread();
2553 release_lock = false;
2554 }
2555
bellard13eb76e2004-01-24 15:23:36 +00002556 len -= l;
2557 buf += l;
2558 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002559
2560 if (!len) {
2561 break;
2562 }
2563
2564 l = len;
2565 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002566 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002567
Peter Maydell3b643492015-04-26 16:49:23 +01002568 return result;
bellard13eb76e2004-01-24 15:23:36 +00002569}
bellard8df1cd02005-01-28 22:37:22 +00002570
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002571MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2572 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002573{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002574 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002575 hwaddr addr1;
2576 MemoryRegion *mr;
2577 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002578
2579 if (len > 0) {
2580 rcu_read_lock();
2581 l = len;
2582 mr = address_space_translate(as, addr, &addr1, &l, true);
2583 result = address_space_write_continue(as, addr, attrs, buf, len,
2584 addr1, l, mr);
2585 rcu_read_unlock();
2586 }
2587
2588 return result;
2589}
2590
2591/* Called within RCU critical section. */
2592MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2593 MemTxAttrs attrs, uint8_t *buf,
2594 int len, hwaddr addr1, hwaddr l,
2595 MemoryRegion *mr)
2596{
2597 uint8_t *ptr;
2598 uint64_t val;
2599 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002600 bool release_lock = false;
2601
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002602 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002603 if (!memory_access_is_direct(mr, false)) {
2604 /* I/O case */
2605 release_lock |= prepare_mmio_access(mr);
2606 l = memory_access_size(mr, l, addr1);
2607 switch (l) {
2608 case 8:
2609 /* 64 bit read access */
2610 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2611 attrs);
2612 stq_p(buf, val);
2613 break;
2614 case 4:
2615 /* 32 bit read access */
2616 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2617 attrs);
2618 stl_p(buf, val);
2619 break;
2620 case 2:
2621 /* 16 bit read access */
2622 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2623 attrs);
2624 stw_p(buf, val);
2625 break;
2626 case 1:
2627 /* 8 bit read access */
2628 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2629 attrs);
2630 stb_p(buf, val);
2631 break;
2632 default:
2633 abort();
2634 }
2635 } else {
2636 /* RAM case */
2637 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2638 memcpy(buf, ptr, l);
2639 }
2640
2641 if (release_lock) {
2642 qemu_mutex_unlock_iothread();
2643 release_lock = false;
2644 }
2645
2646 len -= l;
2647 buf += l;
2648 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002649
2650 if (!len) {
2651 break;
2652 }
2653
2654 l = len;
2655 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002656 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002657
2658 return result;
2659}
2660
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002661MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2662 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002663{
2664 hwaddr l;
2665 hwaddr addr1;
2666 MemoryRegion *mr;
2667 MemTxResult result = MEMTX_OK;
2668
2669 if (len > 0) {
2670 rcu_read_lock();
2671 l = len;
2672 mr = address_space_translate(as, addr, &addr1, &l, false);
2673 result = address_space_read_continue(as, addr, attrs, buf, len,
2674 addr1, l, mr);
2675 rcu_read_unlock();
2676 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002677
2678 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002679}
2680
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002681MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2682 uint8_t *buf, int len, bool is_write)
2683{
2684 if (is_write) {
2685 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2686 } else {
2687 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2688 }
2689}
Avi Kivityac1970f2012-10-03 16:22:53 +02002690
Avi Kivitya8170e52012-10-23 12:30:10 +02002691void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002692 int len, int is_write)
2693{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002694 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2695 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002696}
2697
Alexander Graf582b55a2013-12-11 14:17:44 +01002698enum write_rom_type {
2699 WRITE_DATA,
2700 FLUSH_CACHE,
2701};
2702
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002703static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002704 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002705{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002706 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002707 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002708 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002709 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002710
Paolo Bonzini41063e12015-03-18 14:21:43 +01002711 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002712 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002713 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002714 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002715
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002716 if (!(memory_region_is_ram(mr) ||
2717 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002718 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002719 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002720 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002721 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002722 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002723 switch (type) {
2724 case WRITE_DATA:
2725 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002726 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002727 break;
2728 case FLUSH_CACHE:
2729 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2730 break;
2731 }
bellardd0ecd2a2006-04-23 17:14:48 +00002732 }
2733 len -= l;
2734 buf += l;
2735 addr += l;
2736 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002737 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002738}
2739
Alexander Graf582b55a2013-12-11 14:17:44 +01002740/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002741void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002742 const uint8_t *buf, int len)
2743{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002744 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002745}
2746
2747void cpu_flush_icache_range(hwaddr start, int len)
2748{
2749 /*
2750 * This function should do the same thing as an icache flush that was
2751 * triggered from within the guest. For TCG we are always cache coherent,
2752 * so there is no need to flush anything. For KVM / Xen we need to flush
2753 * the host's instruction cache at least.
2754 */
2755 if (tcg_enabled()) {
2756 return;
2757 }
2758
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002759 cpu_physical_memory_write_rom_internal(&address_space_memory,
2760 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002761}
2762
aliguori6d16c2f2009-01-22 16:59:11 +00002763typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002764 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002765 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002766 hwaddr addr;
2767 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002768 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002769} BounceBuffer;
2770
2771static BounceBuffer bounce;
2772
aliguoriba223c22009-01-22 16:59:16 +00002773typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002774 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002775 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002776} MapClient;
2777
Fam Zheng38e047b2015-03-16 17:03:35 +08002778QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002779static QLIST_HEAD(map_client_list, MapClient) map_client_list
2780 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002781
Fam Zhenge95205e2015-03-16 17:03:37 +08002782static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002783{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002784 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002785 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002786}
2787
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002788static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002789{
2790 MapClient *client;
2791
Blue Swirl72cf2d42009-09-12 07:36:22 +00002792 while (!QLIST_EMPTY(&map_client_list)) {
2793 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002794 qemu_bh_schedule(client->bh);
2795 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002796 }
2797}
2798
Fam Zhenge95205e2015-03-16 17:03:37 +08002799void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002800{
2801 MapClient *client = g_malloc(sizeof(*client));
2802
Fam Zheng38e047b2015-03-16 17:03:35 +08002803 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002804 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002805 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002806 if (!atomic_read(&bounce.in_use)) {
2807 cpu_notify_map_clients_locked();
2808 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002809 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002810}
2811
Fam Zheng38e047b2015-03-16 17:03:35 +08002812void cpu_exec_init_all(void)
2813{
2814 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002815 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002816 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002817 qemu_mutex_init(&map_client_list_lock);
2818}
2819
Fam Zhenge95205e2015-03-16 17:03:37 +08002820void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002821{
Fam Zhenge95205e2015-03-16 17:03:37 +08002822 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002823
Fam Zhenge95205e2015-03-16 17:03:37 +08002824 qemu_mutex_lock(&map_client_list_lock);
2825 QLIST_FOREACH(client, &map_client_list, link) {
2826 if (client->bh == bh) {
2827 cpu_unregister_map_client_do(client);
2828 break;
2829 }
2830 }
2831 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002832}
2833
2834static void cpu_notify_map_clients(void)
2835{
Fam Zheng38e047b2015-03-16 17:03:35 +08002836 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002837 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002838 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002839}
2840
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002841bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2842{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002843 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002844 hwaddr l, xlat;
2845
Paolo Bonzini41063e12015-03-18 14:21:43 +01002846 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002847 while (len > 0) {
2848 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002849 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2850 if (!memory_access_is_direct(mr, is_write)) {
2851 l = memory_access_size(mr, l, addr);
2852 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002853 return false;
2854 }
2855 }
2856
2857 len -= l;
2858 addr += l;
2859 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002860 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002861 return true;
2862}
2863
aliguori6d16c2f2009-01-22 16:59:11 +00002864/* Map a physical memory region into a host virtual address.
2865 * May map a subset of the requested range, given by and returned in *plen.
2866 * May return NULL if resources needed to perform the mapping are exhausted.
2867 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002868 * Use cpu_register_map_client() to know when retrying the map operation is
2869 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002870 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002871void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002872 hwaddr addr,
2873 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002874 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002875{
Avi Kivitya8170e52012-10-23 12:30:10 +02002876 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002877 hwaddr done = 0;
2878 hwaddr l, xlat, base;
2879 MemoryRegion *mr, *this_mr;
2880 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002881 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002882
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002883 if (len == 0) {
2884 return NULL;
2885 }
aliguori6d16c2f2009-01-22 16:59:11 +00002886
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002887 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002888 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002889 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002890
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002891 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002892 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002893 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002894 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002895 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002896 /* Avoid unbounded allocations */
2897 l = MIN(l, TARGET_PAGE_SIZE);
2898 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002899 bounce.addr = addr;
2900 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002901
2902 memory_region_ref(mr);
2903 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002904 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002905 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2906 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002907 }
aliguori6d16c2f2009-01-22 16:59:11 +00002908
Paolo Bonzini41063e12015-03-18 14:21:43 +01002909 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002910 *plen = l;
2911 return bounce.buffer;
2912 }
2913
2914 base = xlat;
2915 raddr = memory_region_get_ram_addr(mr);
2916
2917 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002918 len -= l;
2919 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002920 done += l;
2921 if (len == 0) {
2922 break;
2923 }
2924
2925 l = len;
2926 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2927 if (this_mr != mr || xlat != base + done) {
2928 break;
2929 }
aliguori6d16c2f2009-01-22 16:59:11 +00002930 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002931
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002932 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002933 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002934 ptr = qemu_ram_ptr_length(raddr + base, plen);
2935 rcu_read_unlock();
2936
2937 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002938}
2939
Avi Kivityac1970f2012-10-03 16:22:53 +02002940/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002941 * Will also mark the memory as dirty if is_write == 1. access_len gives
2942 * the amount of memory that was actually read or written by the caller.
2943 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002944void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2945 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002946{
2947 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002948 MemoryRegion *mr;
2949 ram_addr_t addr1;
2950
2951 mr = qemu_ram_addr_from_host(buffer, &addr1);
2952 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002953 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002954 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002955 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002956 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002957 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002958 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002959 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002960 return;
2961 }
2962 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002963 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2964 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002965 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002966 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002967 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002968 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002969 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002970 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002971}
bellardd0ecd2a2006-04-23 17:14:48 +00002972
Avi Kivitya8170e52012-10-23 12:30:10 +02002973void *cpu_physical_memory_map(hwaddr addr,
2974 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002975 int is_write)
2976{
2977 return address_space_map(&address_space_memory, addr, plen, is_write);
2978}
2979
Avi Kivitya8170e52012-10-23 12:30:10 +02002980void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2981 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002982{
2983 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2984}
2985
bellard8df1cd02005-01-28 22:37:22 +00002986/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002987static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2988 MemTxAttrs attrs,
2989 MemTxResult *result,
2990 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002991{
bellard8df1cd02005-01-28 22:37:22 +00002992 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002993 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002994 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002995 hwaddr l = 4;
2996 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002997 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002998 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002999
Paolo Bonzini41063e12015-03-18 14:21:43 +01003000 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003001 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003002 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003003 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003004
bellard8df1cd02005-01-28 22:37:22 +00003005 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003006 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007#if defined(TARGET_WORDS_BIGENDIAN)
3008 if (endian == DEVICE_LITTLE_ENDIAN) {
3009 val = bswap32(val);
3010 }
3011#else
3012 if (endian == DEVICE_BIG_ENDIAN) {
3013 val = bswap32(val);
3014 }
3015#endif
bellard8df1cd02005-01-28 22:37:22 +00003016 } else {
3017 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003018 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003019 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003020 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003021 switch (endian) {
3022 case DEVICE_LITTLE_ENDIAN:
3023 val = ldl_le_p(ptr);
3024 break;
3025 case DEVICE_BIG_ENDIAN:
3026 val = ldl_be_p(ptr);
3027 break;
3028 default:
3029 val = ldl_p(ptr);
3030 break;
3031 }
Peter Maydell50013112015-04-26 16:49:24 +01003032 r = MEMTX_OK;
3033 }
3034 if (result) {
3035 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003036 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003037 if (release_lock) {
3038 qemu_mutex_unlock_iothread();
3039 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003040 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003041 return val;
3042}
3043
Peter Maydell50013112015-04-26 16:49:24 +01003044uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3045 MemTxAttrs attrs, MemTxResult *result)
3046{
3047 return address_space_ldl_internal(as, addr, attrs, result,
3048 DEVICE_NATIVE_ENDIAN);
3049}
3050
3051uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3052 MemTxAttrs attrs, MemTxResult *result)
3053{
3054 return address_space_ldl_internal(as, addr, attrs, result,
3055 DEVICE_LITTLE_ENDIAN);
3056}
3057
3058uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3059 MemTxAttrs attrs, MemTxResult *result)
3060{
3061 return address_space_ldl_internal(as, addr, attrs, result,
3062 DEVICE_BIG_ENDIAN);
3063}
3064
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003065uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003066{
Peter Maydell50013112015-04-26 16:49:24 +01003067 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003068}
3069
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003070uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003071{
Peter Maydell50013112015-04-26 16:49:24 +01003072 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003073}
3074
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003075uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003076{
Peter Maydell50013112015-04-26 16:49:24 +01003077 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003078}
3079
bellard84b7b8e2005-11-28 21:19:04 +00003080/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003081static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3082 MemTxAttrs attrs,
3083 MemTxResult *result,
3084 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003085{
bellard84b7b8e2005-11-28 21:19:04 +00003086 uint8_t *ptr;
3087 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003088 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003089 hwaddr l = 8;
3090 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003091 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003092 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003093
Paolo Bonzini41063e12015-03-18 14:21:43 +01003094 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003095 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003096 false);
3097 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003098 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003099
bellard84b7b8e2005-11-28 21:19:04 +00003100 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003101 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003102#if defined(TARGET_WORDS_BIGENDIAN)
3103 if (endian == DEVICE_LITTLE_ENDIAN) {
3104 val = bswap64(val);
3105 }
3106#else
3107 if (endian == DEVICE_BIG_ENDIAN) {
3108 val = bswap64(val);
3109 }
3110#endif
bellard84b7b8e2005-11-28 21:19:04 +00003111 } else {
3112 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003113 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003114 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003115 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003116 switch (endian) {
3117 case DEVICE_LITTLE_ENDIAN:
3118 val = ldq_le_p(ptr);
3119 break;
3120 case DEVICE_BIG_ENDIAN:
3121 val = ldq_be_p(ptr);
3122 break;
3123 default:
3124 val = ldq_p(ptr);
3125 break;
3126 }
Peter Maydell50013112015-04-26 16:49:24 +01003127 r = MEMTX_OK;
3128 }
3129 if (result) {
3130 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003131 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003132 if (release_lock) {
3133 qemu_mutex_unlock_iothread();
3134 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003135 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003136 return val;
3137}
3138
Peter Maydell50013112015-04-26 16:49:24 +01003139uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3140 MemTxAttrs attrs, MemTxResult *result)
3141{
3142 return address_space_ldq_internal(as, addr, attrs, result,
3143 DEVICE_NATIVE_ENDIAN);
3144}
3145
3146uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3147 MemTxAttrs attrs, MemTxResult *result)
3148{
3149 return address_space_ldq_internal(as, addr, attrs, result,
3150 DEVICE_LITTLE_ENDIAN);
3151}
3152
3153uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3154 MemTxAttrs attrs, MemTxResult *result)
3155{
3156 return address_space_ldq_internal(as, addr, attrs, result,
3157 DEVICE_BIG_ENDIAN);
3158}
3159
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003160uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161{
Peter Maydell50013112015-04-26 16:49:24 +01003162 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003163}
3164
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003165uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003166{
Peter Maydell50013112015-04-26 16:49:24 +01003167 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003168}
3169
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003170uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003171{
Peter Maydell50013112015-04-26 16:49:24 +01003172 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003173}
3174
bellardaab33092005-10-30 20:48:42 +00003175/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003176uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3177 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003178{
3179 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003180 MemTxResult r;
3181
3182 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3183 if (result) {
3184 *result = r;
3185 }
bellardaab33092005-10-30 20:48:42 +00003186 return val;
3187}
3188
Peter Maydell50013112015-04-26 16:49:24 +01003189uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3190{
3191 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3192}
3193
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003194/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003195static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3196 hwaddr addr,
3197 MemTxAttrs attrs,
3198 MemTxResult *result,
3199 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003200{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003201 uint8_t *ptr;
3202 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003203 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003204 hwaddr l = 2;
3205 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003206 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003207 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003208
Paolo Bonzini41063e12015-03-18 14:21:43 +01003209 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003210 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003211 false);
3212 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003213 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003214
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003215 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003216 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003217#if defined(TARGET_WORDS_BIGENDIAN)
3218 if (endian == DEVICE_LITTLE_ENDIAN) {
3219 val = bswap16(val);
3220 }
3221#else
3222 if (endian == DEVICE_BIG_ENDIAN) {
3223 val = bswap16(val);
3224 }
3225#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003226 } else {
3227 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003228 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003229 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003230 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003231 switch (endian) {
3232 case DEVICE_LITTLE_ENDIAN:
3233 val = lduw_le_p(ptr);
3234 break;
3235 case DEVICE_BIG_ENDIAN:
3236 val = lduw_be_p(ptr);
3237 break;
3238 default:
3239 val = lduw_p(ptr);
3240 break;
3241 }
Peter Maydell50013112015-04-26 16:49:24 +01003242 r = MEMTX_OK;
3243 }
3244 if (result) {
3245 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003246 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003247 if (release_lock) {
3248 qemu_mutex_unlock_iothread();
3249 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003250 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003251 return val;
bellardaab33092005-10-30 20:48:42 +00003252}
3253
Peter Maydell50013112015-04-26 16:49:24 +01003254uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3255 MemTxAttrs attrs, MemTxResult *result)
3256{
3257 return address_space_lduw_internal(as, addr, attrs, result,
3258 DEVICE_NATIVE_ENDIAN);
3259}
3260
3261uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3262 MemTxAttrs attrs, MemTxResult *result)
3263{
3264 return address_space_lduw_internal(as, addr, attrs, result,
3265 DEVICE_LITTLE_ENDIAN);
3266}
3267
3268uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3269 MemTxAttrs attrs, MemTxResult *result)
3270{
3271 return address_space_lduw_internal(as, addr, attrs, result,
3272 DEVICE_BIG_ENDIAN);
3273}
3274
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003275uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003276{
Peter Maydell50013112015-04-26 16:49:24 +01003277 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003278}
3279
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003280uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003281{
Peter Maydell50013112015-04-26 16:49:24 +01003282 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003283}
3284
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003285uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003286{
Peter Maydell50013112015-04-26 16:49:24 +01003287 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003288}
3289
bellard8df1cd02005-01-28 22:37:22 +00003290/* warning: addr must be aligned. The ram page is not masked as dirty
3291 and the code inside is not invalidated. It is useful if the dirty
3292 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003293void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3294 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003295{
bellard8df1cd02005-01-28 22:37:22 +00003296 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003297 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003298 hwaddr l = 4;
3299 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003300 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003301 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003302 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003303
Paolo Bonzini41063e12015-03-18 14:21:43 +01003304 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003305 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003306 true);
3307 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003308 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003309
Peter Maydell50013112015-04-26 16:49:24 +01003310 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003311 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003312 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003313 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003314 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003315
Paolo Bonzini845b6212015-03-23 11:45:53 +01003316 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3317 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003318 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003319 r = MEMTX_OK;
3320 }
3321 if (result) {
3322 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003323 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003324 if (release_lock) {
3325 qemu_mutex_unlock_iothread();
3326 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003327 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003328}
3329
Peter Maydell50013112015-04-26 16:49:24 +01003330void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3331{
3332 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3333}
3334
bellard8df1cd02005-01-28 22:37:22 +00003335/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003336static inline void address_space_stl_internal(AddressSpace *as,
3337 hwaddr addr, uint32_t val,
3338 MemTxAttrs attrs,
3339 MemTxResult *result,
3340 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003341{
bellard8df1cd02005-01-28 22:37:22 +00003342 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003343 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003344 hwaddr l = 4;
3345 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003346 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003347 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003348
Paolo Bonzini41063e12015-03-18 14:21:43 +01003349 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003350 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003351 true);
3352 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003353 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003354
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003355#if defined(TARGET_WORDS_BIGENDIAN)
3356 if (endian == DEVICE_LITTLE_ENDIAN) {
3357 val = bswap32(val);
3358 }
3359#else
3360 if (endian == DEVICE_BIG_ENDIAN) {
3361 val = bswap32(val);
3362 }
3363#endif
Peter Maydell50013112015-04-26 16:49:24 +01003364 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003365 } else {
bellard8df1cd02005-01-28 22:37:22 +00003366 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003367 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003368 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369 switch (endian) {
3370 case DEVICE_LITTLE_ENDIAN:
3371 stl_le_p(ptr, val);
3372 break;
3373 case DEVICE_BIG_ENDIAN:
3374 stl_be_p(ptr, val);
3375 break;
3376 default:
3377 stl_p(ptr, val);
3378 break;
3379 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003380 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003381 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003382 }
Peter Maydell50013112015-04-26 16:49:24 +01003383 if (result) {
3384 *result = r;
3385 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003386 if (release_lock) {
3387 qemu_mutex_unlock_iothread();
3388 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003389 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003390}
3391
3392void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3393 MemTxAttrs attrs, MemTxResult *result)
3394{
3395 address_space_stl_internal(as, addr, val, attrs, result,
3396 DEVICE_NATIVE_ENDIAN);
3397}
3398
3399void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3400 MemTxAttrs attrs, MemTxResult *result)
3401{
3402 address_space_stl_internal(as, addr, val, attrs, result,
3403 DEVICE_LITTLE_ENDIAN);
3404}
3405
3406void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3407 MemTxAttrs attrs, MemTxResult *result)
3408{
3409 address_space_stl_internal(as, addr, val, attrs, result,
3410 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003411}
3412
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003413void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003414{
Peter Maydell50013112015-04-26 16:49:24 +01003415 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003416}
3417
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003418void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003419{
Peter Maydell50013112015-04-26 16:49:24 +01003420 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003421}
3422
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003423void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003424{
Peter Maydell50013112015-04-26 16:49:24 +01003425 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003426}
3427
bellardaab33092005-10-30 20:48:42 +00003428/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003429void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3430 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003431{
3432 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003433 MemTxResult r;
3434
3435 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3436 if (result) {
3437 *result = r;
3438 }
3439}
3440
3441void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3442{
3443 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003444}
3445
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003446/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003447static inline void address_space_stw_internal(AddressSpace *as,
3448 hwaddr addr, uint32_t val,
3449 MemTxAttrs attrs,
3450 MemTxResult *result,
3451 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003452{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003453 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003454 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003455 hwaddr l = 2;
3456 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003457 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003458 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003459
Paolo Bonzini41063e12015-03-18 14:21:43 +01003460 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003461 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003462 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003463 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003464
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003465#if defined(TARGET_WORDS_BIGENDIAN)
3466 if (endian == DEVICE_LITTLE_ENDIAN) {
3467 val = bswap16(val);
3468 }
3469#else
3470 if (endian == DEVICE_BIG_ENDIAN) {
3471 val = bswap16(val);
3472 }
3473#endif
Peter Maydell50013112015-04-26 16:49:24 +01003474 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003475 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003476 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003477 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003478 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003479 switch (endian) {
3480 case DEVICE_LITTLE_ENDIAN:
3481 stw_le_p(ptr, val);
3482 break;
3483 case DEVICE_BIG_ENDIAN:
3484 stw_be_p(ptr, val);
3485 break;
3486 default:
3487 stw_p(ptr, val);
3488 break;
3489 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003490 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003491 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003492 }
Peter Maydell50013112015-04-26 16:49:24 +01003493 if (result) {
3494 *result = r;
3495 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003496 if (release_lock) {
3497 qemu_mutex_unlock_iothread();
3498 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003499 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003500}
3501
3502void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3503 MemTxAttrs attrs, MemTxResult *result)
3504{
3505 address_space_stw_internal(as, addr, val, attrs, result,
3506 DEVICE_NATIVE_ENDIAN);
3507}
3508
3509void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3510 MemTxAttrs attrs, MemTxResult *result)
3511{
3512 address_space_stw_internal(as, addr, val, attrs, result,
3513 DEVICE_LITTLE_ENDIAN);
3514}
3515
3516void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3517 MemTxAttrs attrs, MemTxResult *result)
3518{
3519 address_space_stw_internal(as, addr, val, attrs, result,
3520 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003521}
3522
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003523void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003524{
Peter Maydell50013112015-04-26 16:49:24 +01003525 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003526}
3527
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003528void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003529{
Peter Maydell50013112015-04-26 16:49:24 +01003530 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003531}
3532
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003533void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003534{
Peter Maydell50013112015-04-26 16:49:24 +01003535 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003536}
3537
bellardaab33092005-10-30 20:48:42 +00003538/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003539void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3540 MemTxAttrs attrs, MemTxResult *result)
3541{
3542 MemTxResult r;
3543 val = tswap64(val);
3544 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3545 if (result) {
3546 *result = r;
3547 }
3548}
3549
3550void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3551 MemTxAttrs attrs, MemTxResult *result)
3552{
3553 MemTxResult r;
3554 val = cpu_to_le64(val);
3555 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3556 if (result) {
3557 *result = r;
3558 }
3559}
3560void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3561 MemTxAttrs attrs, MemTxResult *result)
3562{
3563 MemTxResult r;
3564 val = cpu_to_be64(val);
3565 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3566 if (result) {
3567 *result = r;
3568 }
3569}
3570
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003571void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003572{
Peter Maydell50013112015-04-26 16:49:24 +01003573 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003574}
3575
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003576void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003577{
Peter Maydell50013112015-04-26 16:49:24 +01003578 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003579}
3580
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003581void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003582{
Peter Maydell50013112015-04-26 16:49:24 +01003583 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003584}
3585
aliguori5e2972f2009-03-28 17:51:36 +00003586/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003587int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003588 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003589{
3590 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003591 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003592 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003593
3594 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003595 int asidx;
3596 MemTxAttrs attrs;
3597
bellard13eb76e2004-01-24 15:23:36 +00003598 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003599 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3600 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003601 /* if no physical page mapped, return an error */
3602 if (phys_addr == -1)
3603 return -1;
3604 l = (page + TARGET_PAGE_SIZE) - addr;
3605 if (l > len)
3606 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003607 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003608 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003609 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3610 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003611 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003612 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3613 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003614 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003615 }
bellard13eb76e2004-01-24 15:23:36 +00003616 len -= l;
3617 buf += l;
3618 addr += l;
3619 }
3620 return 0;
3621}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003622
3623/*
3624 * Allows code that needs to deal with migration bitmaps etc to still be built
3625 * target independent.
3626 */
3627size_t qemu_target_page_bits(void)
3628{
3629 return TARGET_PAGE_BITS;
3630}
3631
Paul Brooka68fe892010-03-01 00:08:59 +00003632#endif
bellard13eb76e2004-01-24 15:23:36 +00003633
Blue Swirl8e4a4242013-01-06 18:30:17 +00003634/*
3635 * A helper function for the _utterly broken_ virtio device model to find out if
3636 * it's running on a big endian machine. Don't do this at home kids!
3637 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003638bool target_words_bigendian(void);
3639bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003640{
3641#if defined(TARGET_WORDS_BIGENDIAN)
3642 return true;
3643#else
3644 return false;
3645#endif
3646}
3647
Wen Congyang76f35532012-05-07 12:04:18 +08003648#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003649bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003650{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003651 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003652 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003653 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003654
Paolo Bonzini41063e12015-03-18 14:21:43 +01003655 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003656 mr = address_space_translate(&address_space_memory,
3657 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003658
Paolo Bonzini41063e12015-03-18 14:21:43 +01003659 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3660 rcu_read_unlock();
3661 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003662}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003663
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003664int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003665{
3666 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003667 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003668
Mike Day0dc3f442013-09-05 14:41:35 -04003669 rcu_read_lock();
3670 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003671 ret = func(block->idstr, block->host, block->offset,
3672 block->used_length, opaque);
3673 if (ret) {
3674 break;
3675 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003676 }
Mike Day0dc3f442013-09-05 14:41:35 -04003677 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003678 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003679}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003680#endif