blob: c268c36e5c916a3775fd9e76e3733f743eb46f80 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200311static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200312 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000313{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200314 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200315 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200317
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200318 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200319 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200320 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200321 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100323 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200324 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200325
326 if (sections[lp.ptr].size.hi ||
327 range_covers_byte(sections[lp.ptr].offset_within_address_space,
328 sections[lp.ptr].size.lo, addr)) {
329 return &sections[lp.ptr];
330 } else {
331 return &sections[PHYS_SECTION_UNASSIGNED];
332 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200333}
334
Blue Swirle5548612012-04-21 13:08:33 +0000335bool memory_region_is_unassigned(MemoryRegion *mr)
336{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200337 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000338 && mr != &io_mem_watch;
339}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200340
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100341/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200342static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200343 hwaddr addr,
344 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200345{
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 MemoryRegionSection *section;
347 subpage_t *subpage;
348
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200349 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200350 if (resolve_subpage && section->mr->subpage) {
351 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 }
354 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200355}
356
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100357/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200358static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200359address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200361{
362 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200363 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200365
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200366 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367 /* Compute offset within MemoryRegionSection */
368 addr -= section->offset_within_address_space;
369
370 /* Compute offset within MemoryRegion */
371 *xlat = addr + section->offset_within_region;
372
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200373 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200374
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
380 * here.
381 *
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
385 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200387 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 return section;
391}
Jan Kiszka90260c62013-05-26 21:46:51 +0200392
Paolo Bonzini41063e12015-03-18 14:21:43 +0100393/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200394MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
395 hwaddr *xlat, hwaddr *plen,
396 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200397{
Avi Kivity30951152012-10-30 13:47:46 +0200398 IOMMUTLBEntry iotlb;
399 MemoryRegionSection *section;
400 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200401
402 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100403 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
404 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200405 mr = section->mr;
406
407 if (!mr->iommu_ops) {
408 break;
409 }
410
Le Tan8d7b8cb2014-08-16 13:55:37 +0800411 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200412 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
413 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700414 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200415 if (!(iotlb.perm & (1 << is_write))) {
416 mr = &io_mem_unassigned;
417 break;
418 }
419
420 as = iotlb.target_as;
421 }
422
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000423 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100424 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700425 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100426 }
427
Avi Kivity30951152012-10-30 13:47:46 +0200428 *xlat = addr;
429 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200430}
431
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100432/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200433MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200434address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
435 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200436{
Avi Kivity30951152012-10-30 13:47:46 +0200437 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100438 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200439 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200440
441 assert(!section->mr->iommu_ops);
442 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200443}
bellard9fa3e852004-01-04 18:06:42 +0000444#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000445
Andreas Färberb170fce2013-01-20 20:23:22 +0100446#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000447
Juan Quintelae59fb372009-09-29 22:48:21 +0200448static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200449{
Andreas Färber259186a2013-01-17 18:51:17 +0100450 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200451
aurel323098dba2009-03-07 21:28:24 +0000452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100454 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100455 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000456
457 return 0;
458}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200459
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400460static int cpu_common_pre_load(void *opaque)
461{
462 CPUState *cpu = opaque;
463
Paolo Bonziniadee6422014-12-19 12:53:14 +0100464 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400465
466 return 0;
467}
468
469static bool cpu_common_exception_index_needed(void *opaque)
470{
471 CPUState *cpu = opaque;
472
Paolo Bonziniadee6422014-12-19 12:53:14 +0100473 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474}
475
476static const VMStateDescription vmstate_cpu_common_exception_index = {
477 .name = "cpu_common/exception_index",
478 .version_id = 1,
479 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200480 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400481 .fields = (VMStateField[]) {
482 VMSTATE_INT32(exception_index, CPUState),
483 VMSTATE_END_OF_LIST()
484 }
485};
486
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300487static bool cpu_common_crash_occurred_needed(void *opaque)
488{
489 CPUState *cpu = opaque;
490
491 return cpu->crash_occurred;
492}
493
494static const VMStateDescription vmstate_cpu_common_crash_occurred = {
495 .name = "cpu_common/crash_occurred",
496 .version_id = 1,
497 .minimum_version_id = 1,
498 .needed = cpu_common_crash_occurred_needed,
499 .fields = (VMStateField[]) {
500 VMSTATE_BOOL(crash_occurred, CPUState),
501 VMSTATE_END_OF_LIST()
502 }
503};
504
Andreas Färber1a1562f2013-06-17 04:09:11 +0200505const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200506 .name = "cpu_common",
507 .version_id = 1,
508 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400509 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200511 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100512 VMSTATE_UINT32(halted, CPUState),
513 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200514 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400515 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200516 .subsections = (const VMStateDescription*[]) {
517 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300518 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200519 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200520 }
521};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200522
pbrook9656f322008-07-01 20:01:19 +0000523#endif
524
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100525CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400526{
Andreas Färberbdc44642013-06-24 23:50:24 +0200527 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400528
Andreas Färberbdc44642013-06-24 23:50:24 +0200529 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100530 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200531 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100532 }
Glauber Costa950f1472009-06-09 12:15:18 -0400533 }
534
Andreas Färberbdc44642013-06-24 23:50:24 +0200535 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400536}
537
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000538#if !defined(CONFIG_USER_ONLY)
539void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
540{
541 /* We only support one address space per cpu at the moment. */
542 assert(cpu->as == as);
543
Peter Maydell32857f42015-10-01 15:29:50 +0100544 if (cpu->cpu_ases) {
545 /* We've already registered the listener for our only AS */
546 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000547 }
Peter Maydell32857f42015-10-01 15:29:50 +0100548
549 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
550 cpu->cpu_ases[0].cpu = cpu;
551 cpu->cpu_ases[0].as = as;
552 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
553 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000554}
555#endif
556
Bharata B Raob7bca732015-06-23 19:31:13 -0700557#ifndef CONFIG_USER_ONLY
558static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
559
560static int cpu_get_free_index(Error **errp)
561{
562 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
563
564 if (cpu >= MAX_CPUMASK_BITS) {
565 error_setg(errp, "Trying to use more CPUs than max of %d",
566 MAX_CPUMASK_BITS);
567 return -1;
568 }
569
570 bitmap_set(cpu_index_map, cpu, 1);
571 return cpu;
572}
573
574void cpu_exec_exit(CPUState *cpu)
575{
576 if (cpu->cpu_index == -1) {
577 /* cpu_index was never allocated by this @cpu or was already freed. */
578 return;
579 }
580
581 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
582 cpu->cpu_index = -1;
583}
584#else
585
586static int cpu_get_free_index(Error **errp)
587{
588 CPUState *some_cpu;
589 int cpu_index = 0;
590
591 CPU_FOREACH(some_cpu) {
592 cpu_index++;
593 }
594 return cpu_index;
595}
596
597void cpu_exec_exit(CPUState *cpu)
598{
599}
600#endif
601
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700602void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000603{
Andreas Färberb170fce2013-01-20 20:23:22 +0100604 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000605 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700606 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000607
Eduardo Habkost291135b2015-04-27 17:00:33 -0300608#ifndef CONFIG_USER_ONLY
609 cpu->as = &address_space_memory;
610 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300611#endif
612
pbrookc2764712009-03-07 15:24:59 +0000613#if defined(CONFIG_USER_ONLY)
614 cpu_list_lock();
615#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700616 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
617 if (local_err) {
618 error_propagate(errp, local_err);
619#if defined(CONFIG_USER_ONLY)
620 cpu_list_unlock();
621#endif
622 return;
bellard6a00d602005-11-21 23:25:50 +0000623 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200624 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000625#if defined(CONFIG_USER_ONLY)
626 cpu_list_unlock();
627#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200628 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
629 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
630 }
pbrookb3c77242008-06-30 16:31:04 +0000631#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600632 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700633 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100634 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200635 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000636#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100637 if (cc->vmsd != NULL) {
638 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
639 }
bellardfd6ce8f2003-05-14 19:00:11 +0000640}
641
Paul Brook94df27f2010-02-28 23:47:45 +0000642#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200643static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000644{
645 tb_invalidate_phys_page_range(pc, pc + 1, 0);
646}
647#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200648static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400649{
Max Filippove8262a12013-09-27 22:29:17 +0400650 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
651 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000652 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100653 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400654 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400655}
bellardc27004e2005-01-03 23:35:10 +0000656#endif
bellardd720b932004-04-25 17:57:43 +0000657
Paul Brookc527ee82010-03-01 03:31:14 +0000658#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200659void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000660
661{
662}
663
Peter Maydell3ee887e2014-09-12 14:06:48 +0100664int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
665 int flags)
666{
667 return -ENOSYS;
668}
669
670void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
671{
672}
673
Andreas Färber75a34032013-09-02 16:57:02 +0200674int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000675 int flags, CPUWatchpoint **watchpoint)
676{
677 return -ENOSYS;
678}
679#else
pbrook6658ffb2007-03-16 23:58:11 +0000680/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200681int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000682 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000683{
aliguoric0ce9982008-11-25 22:13:57 +0000684 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000685
Peter Maydell05068c02014-09-12 14:06:48 +0100686 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700687 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200688 error_report("tried to set invalid watchpoint at %"
689 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000690 return -EINVAL;
691 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500692 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000693
aliguoria1d1bb32008-11-18 20:07:32 +0000694 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100695 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000696 wp->flags = flags;
697
aliguori2dc9f412008-11-18 20:56:59 +0000698 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200699 if (flags & BP_GDB) {
700 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
701 } else {
702 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
703 }
aliguoria1d1bb32008-11-18 20:07:32 +0000704
Andreas Färber31b030d2013-09-04 01:29:02 +0200705 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000706
707 if (watchpoint)
708 *watchpoint = wp;
709 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000710}
711
aliguoria1d1bb32008-11-18 20:07:32 +0000712/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200713int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000714 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000715{
aliguoria1d1bb32008-11-18 20:07:32 +0000716 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000717
Andreas Färberff4700b2013-08-26 18:23:18 +0200718 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100719 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000720 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200721 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000722 return 0;
723 }
724 }
aliguoria1d1bb32008-11-18 20:07:32 +0000725 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000726}
727
aliguoria1d1bb32008-11-18 20:07:32 +0000728/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200729void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000730{
Andreas Färberff4700b2013-08-26 18:23:18 +0200731 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000732
Andreas Färber31b030d2013-09-04 01:29:02 +0200733 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000734
Anthony Liguori7267c092011-08-20 22:09:37 -0500735 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000736}
737
aliguoria1d1bb32008-11-18 20:07:32 +0000738/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200739void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000740{
aliguoric0ce9982008-11-25 22:13:57 +0000741 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000742
Andreas Färberff4700b2013-08-26 18:23:18 +0200743 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200744 if (wp->flags & mask) {
745 cpu_watchpoint_remove_by_ref(cpu, wp);
746 }
aliguoric0ce9982008-11-25 22:13:57 +0000747 }
aliguoria1d1bb32008-11-18 20:07:32 +0000748}
Peter Maydell05068c02014-09-12 14:06:48 +0100749
750/* Return true if this watchpoint address matches the specified
751 * access (ie the address range covered by the watchpoint overlaps
752 * partially or completely with the address range covered by the
753 * access).
754 */
755static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
756 vaddr addr,
757 vaddr len)
758{
759 /* We know the lengths are non-zero, but a little caution is
760 * required to avoid errors in the case where the range ends
761 * exactly at the top of the address space and so addr + len
762 * wraps round to zero.
763 */
764 vaddr wpend = wp->vaddr + wp->len - 1;
765 vaddr addrend = addr + len - 1;
766
767 return !(addr > wpend || wp->vaddr > addrend);
768}
769
Paul Brookc527ee82010-03-01 03:31:14 +0000770#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000771
772/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200773int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000774 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000775{
aliguoric0ce9982008-11-25 22:13:57 +0000776 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000777
Anthony Liguori7267c092011-08-20 22:09:37 -0500778 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000779
780 bp->pc = pc;
781 bp->flags = flags;
782
aliguori2dc9f412008-11-18 20:56:59 +0000783 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200784 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200785 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200786 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200787 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200788 }
aliguoria1d1bb32008-11-18 20:07:32 +0000789
Andreas Färberf0c3c502013-08-26 21:22:53 +0200790 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000791
Andreas Färber00b941e2013-06-29 18:55:54 +0200792 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000793 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200794 }
aliguoria1d1bb32008-11-18 20:07:32 +0000795 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000796}
797
798/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200799int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000800{
aliguoria1d1bb32008-11-18 20:07:32 +0000801 CPUBreakpoint *bp;
802
Andreas Färberf0c3c502013-08-26 21:22:53 +0200803 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000804 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200805 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000806 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000807 }
bellard4c3a88a2003-07-26 12:06:08 +0000808 }
aliguoria1d1bb32008-11-18 20:07:32 +0000809 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000810}
811
aliguoria1d1bb32008-11-18 20:07:32 +0000812/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200813void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000814{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200815 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
816
817 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000818
Anthony Liguori7267c092011-08-20 22:09:37 -0500819 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000820}
821
822/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200823void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000824{
aliguoric0ce9982008-11-25 22:13:57 +0000825 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000826
Andreas Färberf0c3c502013-08-26 21:22:53 +0200827 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200828 if (bp->flags & mask) {
829 cpu_breakpoint_remove_by_ref(cpu, bp);
830 }
aliguoric0ce9982008-11-25 22:13:57 +0000831 }
bellard4c3a88a2003-07-26 12:06:08 +0000832}
833
bellardc33a3462003-07-29 20:50:33 +0000834/* enable or disable single step mode. EXCP_DEBUG is returned by the
835 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200836void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000837{
Andreas Färbered2803d2013-06-21 20:20:45 +0200838 if (cpu->singlestep_enabled != enabled) {
839 cpu->singlestep_enabled = enabled;
840 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200841 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200842 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100843 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000844 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700845 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000846 }
bellardc33a3462003-07-29 20:50:33 +0000847 }
bellardc33a3462003-07-29 20:50:33 +0000848}
849
Andreas Färbera47dddd2013-09-03 17:38:47 +0200850void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000851{
852 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000853 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000854
855 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000856 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000857 fprintf(stderr, "qemu: fatal: ");
858 vfprintf(stderr, fmt, ap);
859 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200860 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100861 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000862 qemu_log("qemu: fatal: ");
863 qemu_log_vprintf(fmt, ap2);
864 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200865 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000866 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000867 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000868 }
pbrook493ae1f2007-11-23 16:53:59 +0000869 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000870 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300871 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200872#if defined(CONFIG_USER_ONLY)
873 {
874 struct sigaction act;
875 sigfillset(&act.sa_mask);
876 act.sa_handler = SIG_DFL;
877 sigaction(SIGABRT, &act, NULL);
878 }
879#endif
bellard75012672003-06-21 13:11:07 +0000880 abort();
881}
882
bellard01243112004-01-04 15:48:17 +0000883#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400884/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200885static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
886{
887 RAMBlock *block;
888
Paolo Bonzini43771532013-09-09 17:58:40 +0200889 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200890 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200891 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200892 }
Mike Day0dc3f442013-09-05 14:41:35 -0400893 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200894 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200895 goto found;
896 }
897 }
898
899 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
900 abort();
901
902found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200903 /* It is safe to write mru_block outside the iothread lock. This
904 * is what happens:
905 *
906 * mru_block = xxx
907 * rcu_read_unlock()
908 * xxx removed from list
909 * rcu_read_lock()
910 * read mru_block
911 * mru_block = NULL;
912 * call_rcu(reclaim_ramblock, xxx);
913 * rcu_read_unlock()
914 *
915 * atomic_rcu_set is not needed here. The block was already published
916 * when it was placed into the list. Here we're just making an extra
917 * copy of the pointer.
918 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200919 ram_list.mru_block = block;
920 return block;
921}
922
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200923static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000924{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700925 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200926 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200927 RAMBlock *block;
928 ram_addr_t end;
929
930 end = TARGET_PAGE_ALIGN(start + length);
931 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000932
Mike Day0dc3f442013-09-05 14:41:35 -0400933 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200934 block = qemu_get_ram_block(start);
935 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200936 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700937 CPU_FOREACH(cpu) {
938 tlb_reset_dirty(cpu, start1, length);
939 }
Mike Day0dc3f442013-09-05 14:41:35 -0400940 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200941}
942
943/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000944bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
945 ram_addr_t length,
946 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200947{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000948 unsigned long end, page;
949 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200950
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000951 if (length == 0) {
952 return false;
953 }
954
955 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
956 page = start >> TARGET_PAGE_BITS;
957 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
958 page, end - page);
959
960 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200961 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200962 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963
964 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000965}
966
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100967/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200968hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200969 MemoryRegionSection *section,
970 target_ulong vaddr,
971 hwaddr paddr, hwaddr xlat,
972 int prot,
973 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000974{
Avi Kivitya8170e52012-10-23 12:30:10 +0200975 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000976 CPUWatchpoint *wp;
977
Blue Swirlcc5bea62012-04-14 14:56:48 +0000978 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000979 /* Normal RAM. */
980 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200981 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000982 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200983 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000984 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200985 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000986 }
987 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +0100988 AddressSpaceDispatch *d;
989
990 d = atomic_rcu_read(&section->address_space->dispatch);
991 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200992 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000993 }
994
995 /* Make accesses to pages with watchpoints go via the
996 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200997 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100998 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000999 /* Avoid trapping reads of pages with a write breakpoint. */
1000 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001001 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001002 *address |= TLB_MMIO;
1003 break;
1004 }
1005 }
1006 }
1007
1008 return iotlb;
1009}
bellard9fa3e852004-01-04 18:06:42 +00001010#endif /* defined(CONFIG_USER_ONLY) */
1011
pbrooke2eef172008-06-08 01:09:01 +00001012#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001013
Anthony Liguoric227f092009-10-01 16:12:16 -05001014static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001015 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001016static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001017
Igor Mammedova2b257d2014-10-31 16:38:37 +00001018static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1019 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001020
1021/*
1022 * Set a custom physical guest memory alloator.
1023 * Accelerators with unusual needs may need this. Hopefully, we can
1024 * get rid of it eventually.
1025 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001026void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001027{
1028 phys_mem_alloc = alloc;
1029}
1030
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001031static uint16_t phys_section_add(PhysPageMap *map,
1032 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001033{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001034 /* The physical section number is ORed with a page-aligned
1035 * pointer to produce the iotlb entries. Thus it should
1036 * never overflow into the page-aligned value.
1037 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001038 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001039
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001040 if (map->sections_nb == map->sections_nb_alloc) {
1041 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1042 map->sections = g_renew(MemoryRegionSection, map->sections,
1043 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001044 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001045 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001046 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001047 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001048}
1049
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001050static void phys_section_destroy(MemoryRegion *mr)
1051{
Don Slutz55b4e802015-11-30 17:11:04 -05001052 bool have_sub_page = mr->subpage;
1053
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001054 memory_region_unref(mr);
1055
Don Slutz55b4e802015-11-30 17:11:04 -05001056 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001057 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001058 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001059 g_free(subpage);
1060 }
1061}
1062
Paolo Bonzini60926662013-05-29 12:30:26 +02001063static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001064{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001065 while (map->sections_nb > 0) {
1066 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001067 phys_section_destroy(section->mr);
1068 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001069 g_free(map->sections);
1070 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001071}
1072
Avi Kivityac1970f2012-10-03 16:22:53 +02001073static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001074{
1075 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001076 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001077 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001078 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001079 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001080 MemoryRegionSection subsection = {
1081 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001082 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001083 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001084 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085
Avi Kivityf3705d52012-03-08 16:16:34 +02001086 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001087
Avi Kivityf3705d52012-03-08 16:16:34 +02001088 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001089 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001090 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001091 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001092 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001093 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001094 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001095 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096 }
1097 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001098 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099 subpage_register(subpage, start, end,
1100 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001101}
1102
1103
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001104static void register_multipage(AddressSpaceDispatch *d,
1105 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001106{
Avi Kivitya8170e52012-10-23 12:30:10 +02001107 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1110 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001111
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001112 assert(num_pages);
1113 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001114}
1115
Avi Kivityac1970f2012-10-03 16:22:53 +02001116static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001117{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001118 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001119 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001120 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001121 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001122
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001123 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1124 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1125 - now.offset_within_address_space;
1126
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001127 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001128 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001129 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001130 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001131 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 while (int128_ne(remain.size, now.size)) {
1133 remain.size = int128_sub(remain.size, now.size);
1134 remain.offset_within_address_space += int128_get64(now.size);
1135 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001136 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001137 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001138 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001139 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001140 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001141 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001142 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001144 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001145 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 }
1147}
1148
Sheng Yang62a27442010-01-26 19:21:16 +08001149void qemu_flush_coalesced_mmio_buffer(void)
1150{
1151 if (kvm_enabled())
1152 kvm_flush_coalesced_mmio_buffer();
1153}
1154
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001155void qemu_mutex_lock_ramlist(void)
1156{
1157 qemu_mutex_lock(&ram_list.mutex);
1158}
1159
1160void qemu_mutex_unlock_ramlist(void)
1161{
1162 qemu_mutex_unlock(&ram_list.mutex);
1163}
1164
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001165#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001166
1167#include <sys/vfs.h>
1168
1169#define HUGETLBFS_MAGIC 0x958458f6
1170
Hu Taofc7a5802014-09-09 13:28:01 +08001171static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001172{
1173 struct statfs fs;
1174 int ret;
1175
1176 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001177 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001178 } while (ret != 0 && errno == EINTR);
1179
1180 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001181 error_setg_errno(errp, errno, "failed to get page size of file %s",
1182 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001183 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001184 }
1185
Marcelo Tosattic9027602010-03-01 20:25:08 -03001186 return fs.f_bsize;
1187}
1188
Alex Williamson04b16652010-07-02 11:13:17 -06001189static void *file_ram_alloc(RAMBlock *block,
1190 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001191 const char *path,
1192 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001193{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001194 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001196 char *sanitized_name;
1197 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001198 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001200 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001201 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001202
Hu Taofc7a5802014-09-09 13:28:01 +08001203 hpagesize = gethugepagesize(path, &local_err);
1204 if (local_err) {
1205 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001206 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001207 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001208 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209
1210 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001211 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1212 "or larger than huge page size 0x%" PRIx64,
1213 memory, hpagesize);
1214 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215 }
1216
1217 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001218 error_setg(errp,
1219 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001220 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221 }
1222
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001223 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1224 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1225 sanitized_name = g_strdup(memory_region_name(block->mr));
1226 for (c = sanitized_name; *c != '\0'; c++) {
1227 if (*c == '/') {
1228 *c = '_';
1229 }
1230 }
1231
1232 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1233 sanitized_name);
1234 g_free(sanitized_name);
1235
1236 fd = mkstemp(filename);
1237 if (fd >= 0) {
1238 unlink(filename);
1239 }
1240 g_free(filename);
1241 } else {
1242 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001243 }
1244
Marcelo Tosattic9027602010-03-01 20:25:08 -03001245 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001246 error_setg_errno(errp, errno,
1247 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001248 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001249 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001250
Chen Hanxiao9284f312015-07-24 11:12:03 +08001251 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001252
1253 /*
1254 * ftruncate is not supported by hugetlbfs in older
1255 * hosts, so don't bother bailing out on errors.
1256 * If anything goes wrong with it under other filesystems,
1257 * mmap will fail.
1258 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001259 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001260 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001261 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001262
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001263 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001265 error_setg_errno(errp, errno,
1266 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001267 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001268 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001269 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001270
1271 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001272 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001273 }
1274
Alex Williamson04b16652010-07-02 11:13:17 -06001275 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001276 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001277
1278error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001279 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001280}
1281#endif
1282
Mike Day0dc3f442013-09-05 14:41:35 -04001283/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001284static ram_addr_t find_ram_offset(ram_addr_t size)
1285{
Alex Williamson04b16652010-07-02 11:13:17 -06001286 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001287 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001288
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001289 assert(size != 0); /* it would hand out same offset multiple times */
1290
Mike Day0dc3f442013-09-05 14:41:35 -04001291 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001292 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001293 }
Alex Williamson04b16652010-07-02 11:13:17 -06001294
Mike Day0dc3f442013-09-05 14:41:35 -04001295 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001296 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001297
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001298 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001299
Mike Day0dc3f442013-09-05 14:41:35 -04001300 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001301 if (next_block->offset >= end) {
1302 next = MIN(next, next_block->offset);
1303 }
1304 }
1305 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001306 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001307 mingap = next - end;
1308 }
1309 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001310
1311 if (offset == RAM_ADDR_MAX) {
1312 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1313 (uint64_t)size);
1314 abort();
1315 }
1316
Alex Williamson04b16652010-07-02 11:13:17 -06001317 return offset;
1318}
1319
Juan Quintela652d7ec2012-07-20 10:37:54 +02001320ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001321{
Alex Williamsond17b5282010-06-25 11:08:38 -06001322 RAMBlock *block;
1323 ram_addr_t last = 0;
1324
Mike Day0dc3f442013-09-05 14:41:35 -04001325 rcu_read_lock();
1326 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001327 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001328 }
Mike Day0dc3f442013-09-05 14:41:35 -04001329 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001330 return last;
1331}
1332
Jason Baronddb97f12012-08-02 15:44:16 -04001333static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1334{
1335 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001336
1337 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001338 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001339 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1340 if (ret) {
1341 perror("qemu_madvise");
1342 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1343 "but dump_guest_core=off specified\n");
1344 }
1345 }
1346}
1347
Mike Day0dc3f442013-09-05 14:41:35 -04001348/* Called within an RCU critical section, or while the ramlist lock
1349 * is held.
1350 */
Hu Tao20cfe882014-04-02 15:13:26 +08001351static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001352{
Hu Tao20cfe882014-04-02 15:13:26 +08001353 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001354
Mike Day0dc3f442013-09-05 14:41:35 -04001355 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001356 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001357 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001358 }
1359 }
Hu Tao20cfe882014-04-02 15:13:26 +08001360
1361 return NULL;
1362}
1363
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001364const char *qemu_ram_get_idstr(RAMBlock *rb)
1365{
1366 return rb->idstr;
1367}
1368
Mike Dayae3a7042013-09-05 14:41:35 -04001369/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001370void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1371{
Mike Dayae3a7042013-09-05 14:41:35 -04001372 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001373
Mike Day0dc3f442013-09-05 14:41:35 -04001374 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001375 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001376 assert(new_block);
1377 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001378
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001379 if (dev) {
1380 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001381 if (id) {
1382 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001383 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001384 }
1385 }
1386 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1387
Mike Day0dc3f442013-09-05 14:41:35 -04001388 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001389 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001390 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1391 new_block->idstr);
1392 abort();
1393 }
1394 }
Mike Day0dc3f442013-09-05 14:41:35 -04001395 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001396}
1397
Mike Dayae3a7042013-09-05 14:41:35 -04001398/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001399void qemu_ram_unset_idstr(ram_addr_t addr)
1400{
Mike Dayae3a7042013-09-05 14:41:35 -04001401 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001402
Mike Dayae3a7042013-09-05 14:41:35 -04001403 /* FIXME: arch_init.c assumes that this is not called throughout
1404 * migration. Ignore the problem since hot-unplug during migration
1405 * does not work anyway.
1406 */
1407
Mike Day0dc3f442013-09-05 14:41:35 -04001408 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001409 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001410 if (block) {
1411 memset(block->idstr, 0, sizeof(block->idstr));
1412 }
Mike Day0dc3f442013-09-05 14:41:35 -04001413 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001414}
1415
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001416static int memory_try_enable_merging(void *addr, size_t len)
1417{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001418 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001419 /* disabled by the user */
1420 return 0;
1421 }
1422
1423 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1424}
1425
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001426/* Only legal before guest might have detected the memory size: e.g. on
1427 * incoming migration, or right after reset.
1428 *
1429 * As memory core doesn't know how is memory accessed, it is up to
1430 * resize callback to update device state and/or add assertions to detect
1431 * misuse, if necessary.
1432 */
1433int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1434{
1435 RAMBlock *block = find_ram_block(base);
1436
1437 assert(block);
1438
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001439 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001440
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001441 if (block->used_length == newsize) {
1442 return 0;
1443 }
1444
1445 if (!(block->flags & RAM_RESIZEABLE)) {
1446 error_setg_errno(errp, EINVAL,
1447 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1448 " in != 0x" RAM_ADDR_FMT, block->idstr,
1449 newsize, block->used_length);
1450 return -EINVAL;
1451 }
1452
1453 if (block->max_length < newsize) {
1454 error_setg_errno(errp, EINVAL,
1455 "Length too large: %s: 0x" RAM_ADDR_FMT
1456 " > 0x" RAM_ADDR_FMT, block->idstr,
1457 newsize, block->max_length);
1458 return -EINVAL;
1459 }
1460
1461 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1462 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001463 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1464 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001465 memory_region_set_size(block->mr, newsize);
1466 if (block->resized) {
1467 block->resized(block->idstr, newsize, block->host);
1468 }
1469 return 0;
1470}
1471
Hu Taoef701d72014-09-09 13:27:54 +08001472static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001473{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001474 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001475 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001476 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001477 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001478
1479 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001480
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001481 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001482 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001483
1484 if (!new_block->host) {
1485 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001486 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001487 new_block->mr, &err);
1488 if (err) {
1489 error_propagate(errp, err);
1490 qemu_mutex_unlock_ramlist();
1491 return -1;
1492 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001493 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001494 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001495 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001496 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001497 error_setg_errno(errp, errno,
1498 "cannot set up guest memory '%s'",
1499 memory_region_name(new_block->mr));
1500 qemu_mutex_unlock_ramlist();
1501 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001502 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001503 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001504 }
1505 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001506
Li Zhijiandd631692015-07-02 20:18:06 +08001507 new_ram_size = MAX(old_ram_size,
1508 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1509 if (new_ram_size > old_ram_size) {
1510 migration_bitmap_extend(old_ram_size, new_ram_size);
1511 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001512 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1513 * QLIST (which has an RCU-friendly variant) does not have insertion at
1514 * tail, so save the last element in last_block.
1515 */
Mike Day0dc3f442013-09-05 14:41:35 -04001516 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001517 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001518 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001519 break;
1520 }
1521 }
1522 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001523 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001524 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001525 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001526 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001527 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001528 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001529 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001530
Mike Day0dc3f442013-09-05 14:41:35 -04001531 /* Write list before version */
1532 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001533 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001534 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001535
Juan Quintela2152f5c2013-10-08 13:52:02 +02001536 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1537
1538 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001539 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001540
1541 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001542 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1543 ram_list.dirty_memory[i] =
1544 bitmap_zero_extend(ram_list.dirty_memory[i],
1545 old_ram_size, new_ram_size);
1546 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001547 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001548 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001549 new_block->used_length,
1550 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001551
Paolo Bonzinia904c912015-01-21 16:18:35 +01001552 if (new_block->host) {
1553 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1554 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1555 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1556 if (kvm_enabled()) {
1557 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1558 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001559 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001560
1561 return new_block->offset;
1562}
1563
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001564#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001565ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001566 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001567 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001568{
1569 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001570 ram_addr_t addr;
1571 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001572
1573 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001574 error_setg(errp, "-mem-path not supported with Xen");
1575 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001576 }
1577
1578 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1579 /*
1580 * file_ram_alloc() needs to allocate just like
1581 * phys_mem_alloc, but we haven't bothered to provide
1582 * a hook there.
1583 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001584 error_setg(errp,
1585 "-mem-path not supported with this accelerator");
1586 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001587 }
1588
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001589 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001590 new_block = g_malloc0(sizeof(*new_block));
1591 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001592 new_block->used_length = size;
1593 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001594 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001595 new_block->host = file_ram_alloc(new_block, size,
1596 mem_path, errp);
1597 if (!new_block->host) {
1598 g_free(new_block);
1599 return -1;
1600 }
1601
Hu Taoef701d72014-09-09 13:27:54 +08001602 addr = ram_block_add(new_block, &local_err);
1603 if (local_err) {
1604 g_free(new_block);
1605 error_propagate(errp, local_err);
1606 return -1;
1607 }
1608 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001609}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001610#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001611
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001612static
1613ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1614 void (*resized)(const char*,
1615 uint64_t length,
1616 void *host),
1617 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001618 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001619{
1620 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001621 ram_addr_t addr;
1622 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001623
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001624 size = HOST_PAGE_ALIGN(size);
1625 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001626 new_block = g_malloc0(sizeof(*new_block));
1627 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001628 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001629 new_block->used_length = size;
1630 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001631 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001632 new_block->fd = -1;
1633 new_block->host = host;
1634 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001635 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001637 if (resizeable) {
1638 new_block->flags |= RAM_RESIZEABLE;
1639 }
Hu Taoef701d72014-09-09 13:27:54 +08001640 addr = ram_block_add(new_block, &local_err);
1641 if (local_err) {
1642 g_free(new_block);
1643 error_propagate(errp, local_err);
1644 return -1;
1645 }
1646 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001647}
1648
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001649ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1650 MemoryRegion *mr, Error **errp)
1651{
1652 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1653}
1654
Hu Taoef701d72014-09-09 13:27:54 +08001655ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001656{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001657 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1658}
1659
1660ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1661 void (*resized)(const char*,
1662 uint64_t length,
1663 void *host),
1664 MemoryRegion *mr, Error **errp)
1665{
1666 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001667}
bellarde9a1ab12007-02-08 23:08:38 +00001668
Paolo Bonzini43771532013-09-09 17:58:40 +02001669static void reclaim_ramblock(RAMBlock *block)
1670{
1671 if (block->flags & RAM_PREALLOC) {
1672 ;
1673 } else if (xen_enabled()) {
1674 xen_invalidate_map_cache_entry(block->host);
1675#ifndef _WIN32
1676 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001677 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001678 close(block->fd);
1679#endif
1680 } else {
1681 qemu_anon_ram_free(block->host, block->max_length);
1682 }
1683 g_free(block);
1684}
1685
Anthony Liguoric227f092009-10-01 16:12:16 -05001686void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001687{
Alex Williamson04b16652010-07-02 11:13:17 -06001688 RAMBlock *block;
1689
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001690 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001691 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001692 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001693 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001694 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001695 /* Write list before version */
1696 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001697 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001698 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001699 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001700 }
1701 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001702 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001703}
1704
Huang Yingcd19cfa2011-03-02 08:56:19 +01001705#ifndef _WIN32
1706void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1707{
1708 RAMBlock *block;
1709 ram_addr_t offset;
1710 int flags;
1711 void *area, *vaddr;
1712
Mike Day0dc3f442013-09-05 14:41:35 -04001713 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001714 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001715 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001716 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001717 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001718 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001719 } else if (xen_enabled()) {
1720 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001721 } else {
1722 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001723 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001724 flags |= (block->flags & RAM_SHARED ?
1725 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001726 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1727 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001728 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001729 /*
1730 * Remap needs to match alloc. Accelerators that
1731 * set phys_mem_alloc never remap. If they did,
1732 * we'd need a remap hook here.
1733 */
1734 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1735
Huang Yingcd19cfa2011-03-02 08:56:19 +01001736 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1737 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1738 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001739 }
1740 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001741 fprintf(stderr, "Could not remap addr: "
1742 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001743 length, addr);
1744 exit(1);
1745 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001746 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001747 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001748 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001749 }
1750 }
1751}
1752#endif /* !_WIN32 */
1753
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001754int qemu_get_ram_fd(ram_addr_t addr)
1755{
Mike Dayae3a7042013-09-05 14:41:35 -04001756 RAMBlock *block;
1757 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001758
Mike Day0dc3f442013-09-05 14:41:35 -04001759 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001760 block = qemu_get_ram_block(addr);
1761 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001762 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001763 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001764}
1765
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001766void qemu_set_ram_fd(ram_addr_t addr, int fd)
1767{
1768 RAMBlock *block;
1769
1770 rcu_read_lock();
1771 block = qemu_get_ram_block(addr);
1772 block->fd = fd;
1773 rcu_read_unlock();
1774}
1775
Damjan Marion3fd74b82014-06-26 23:01:32 +02001776void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1777{
Mike Dayae3a7042013-09-05 14:41:35 -04001778 RAMBlock *block;
1779 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001780
Mike Day0dc3f442013-09-05 14:41:35 -04001781 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001782 block = qemu_get_ram_block(addr);
1783 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001784 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001785 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001786}
1787
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001788/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001789 * This should not be used for general purpose DMA. Use address_space_map
1790 * or address_space_rw instead. For local memory (e.g. video ram) that the
1791 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001792 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001793 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001794 */
1795void *qemu_get_ram_ptr(ram_addr_t addr)
1796{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001797 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001798
1799 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001800 /* We need to check if the requested address is in the RAM
1801 * because we don't want to map the entire memory in QEMU.
1802 * In that case just map until the end of the page.
1803 */
1804 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001805 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001806 }
Mike Dayae3a7042013-09-05 14:41:35 -04001807
1808 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001809 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001810 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001811}
1812
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001813/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001814 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001815 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001816 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001817 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001818static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001819{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001820 RAMBlock *block;
1821 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001822 if (*size == 0) {
1823 return NULL;
1824 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001825
1826 block = qemu_get_ram_block(addr);
1827 offset_inside_block = addr - block->offset;
1828 *size = MIN(*size, block->max_length - offset_inside_block);
1829
1830 if (xen_enabled() && block->host == NULL) {
1831 /* We need to check if the requested address is in the RAM
1832 * because we don't want to map the entire memory in QEMU.
1833 * In that case just map the requested area.
1834 */
1835 if (block->offset == 0) {
1836 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001837 }
1838
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001839 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001840 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001841
1842 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001843}
1844
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001845/*
1846 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1847 * in that RAMBlock.
1848 *
1849 * ptr: Host pointer to look up
1850 * round_offset: If true round the result offset down to a page boundary
1851 * *ram_addr: set to result ram_addr
1852 * *offset: set to result offset within the RAMBlock
1853 *
1854 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001855 *
1856 * By the time this function returns, the returned pointer is not protected
1857 * by RCU anymore. If the caller is not within an RCU critical section and
1858 * does not hold the iothread lock, it must have other means of protecting the
1859 * pointer, such as a reference to the region that includes the incoming
1860 * ram_addr_t.
1861 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001862RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1863 ram_addr_t *ram_addr,
1864 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001865{
pbrook94a6b542009-04-11 17:15:54 +00001866 RAMBlock *block;
1867 uint8_t *host = ptr;
1868
Jan Kiszka868bb332011-06-21 22:59:09 +02001869 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001870 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001871 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001872 block = qemu_get_ram_block(*ram_addr);
1873 if (block) {
1874 *offset = (host - block->host);
1875 }
Mike Day0dc3f442013-09-05 14:41:35 -04001876 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001877 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001878 }
1879
Mike Day0dc3f442013-09-05 14:41:35 -04001880 rcu_read_lock();
1881 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001882 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001883 goto found;
1884 }
1885
Mike Day0dc3f442013-09-05 14:41:35 -04001886 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001887 /* This case append when the block is not mapped. */
1888 if (block->host == NULL) {
1889 continue;
1890 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001891 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001892 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001893 }
pbrook94a6b542009-04-11 17:15:54 +00001894 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001895
Mike Day0dc3f442013-09-05 14:41:35 -04001896 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001897 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001898
1899found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001900 *offset = (host - block->host);
1901 if (round_offset) {
1902 *offset &= TARGET_PAGE_MASK;
1903 }
1904 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001905 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001906 return block;
1907}
1908
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001909/*
1910 * Finds the named RAMBlock
1911 *
1912 * name: The name of RAMBlock to find
1913 *
1914 * Returns: RAMBlock (or NULL if not found)
1915 */
1916RAMBlock *qemu_ram_block_by_name(const char *name)
1917{
1918 RAMBlock *block;
1919
1920 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1921 if (!strcmp(name, block->idstr)) {
1922 return block;
1923 }
1924 }
1925
1926 return NULL;
1927}
1928
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001929/* Some of the softmmu routines need to translate from a host pointer
1930 (typically a TLB entry) back to a ram offset. */
1931MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1932{
1933 RAMBlock *block;
1934 ram_addr_t offset; /* Not used */
1935
1936 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1937
1938 if (!block) {
1939 return NULL;
1940 }
1941
1942 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001943}
Alex Williamsonf471a172010-06-11 11:11:42 -06001944
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001945/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001946static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001947 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001948{
Juan Quintela52159192013-10-08 12:44:04 +02001949 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001950 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001951 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001952 switch (size) {
1953 case 1:
1954 stb_p(qemu_get_ram_ptr(ram_addr), val);
1955 break;
1956 case 2:
1957 stw_p(qemu_get_ram_ptr(ram_addr), val);
1958 break;
1959 case 4:
1960 stl_p(qemu_get_ram_ptr(ram_addr), val);
1961 break;
1962 default:
1963 abort();
1964 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001965 /* Set both VGA and migration bits for simplicity and to remove
1966 * the notdirty callback faster.
1967 */
1968 cpu_physical_memory_set_dirty_range(ram_addr, size,
1969 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001970 /* we remove the notdirty callback only if the code has been
1971 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001972 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001973 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001974 }
bellard1ccde1c2004-02-06 19:46:14 +00001975}
1976
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001977static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1978 unsigned size, bool is_write)
1979{
1980 return is_write;
1981}
1982
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001983static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001984 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001985 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001986 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001987};
1988
pbrook0f459d12008-06-09 00:20:13 +00001989/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001990static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001991{
Andreas Färber93afead2013-08-26 03:41:01 +02001992 CPUState *cpu = current_cpu;
1993 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001994 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001995 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001996 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001997 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001998
Andreas Färberff4700b2013-08-26 18:23:18 +02001999 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002000 /* We re-entered the check after replacing the TB. Now raise
2001 * the debug interrupt so that is will trigger after the
2002 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002003 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002004 return;
2005 }
Andreas Färber93afead2013-08-26 03:41:01 +02002006 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002007 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002008 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2009 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002010 if (flags == BP_MEM_READ) {
2011 wp->flags |= BP_WATCHPOINT_HIT_READ;
2012 } else {
2013 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2014 }
2015 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002016 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002017 if (!cpu->watchpoint_hit) {
2018 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002019 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002020 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002021 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002022 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002023 } else {
2024 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002025 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002026 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002027 }
aliguori06d55cc2008-11-18 20:24:06 +00002028 }
aliguori6e140f22008-11-18 20:37:55 +00002029 } else {
2030 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002031 }
2032 }
2033}
2034
pbrook6658ffb2007-03-16 23:58:11 +00002035/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2036 so these check for a hit then pass through to the normal out-of-line
2037 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002038static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2039 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002040{
Peter Maydell66b9b432015-04-26 16:49:24 +01002041 MemTxResult res;
2042 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002043
Peter Maydell66b9b432015-04-26 16:49:24 +01002044 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002045 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002046 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002047 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002048 break;
2049 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002050 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002051 break;
2052 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002053 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002054 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002055 default: abort();
2056 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002057 *pdata = data;
2058 return res;
2059}
2060
2061static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2062 uint64_t val, unsigned size,
2063 MemTxAttrs attrs)
2064{
2065 MemTxResult res;
2066
2067 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2068 switch (size) {
2069 case 1:
2070 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2071 break;
2072 case 2:
2073 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2074 break;
2075 case 4:
2076 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2077 break;
2078 default: abort();
2079 }
2080 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002081}
2082
Avi Kivity1ec9b902012-01-02 12:47:48 +02002083static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002084 .read_with_attrs = watch_mem_read,
2085 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002086 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002087};
pbrook6658ffb2007-03-16 23:58:11 +00002088
Peter Maydellf25a49e2015-04-26 16:49:24 +01002089static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2090 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002091{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002092 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002093 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002094 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002095
blueswir1db7b5422007-05-26 17:36:03 +00002096#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002097 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002098 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002099#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002100 res = address_space_read(subpage->as, addr + subpage->base,
2101 attrs, buf, len);
2102 if (res) {
2103 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002104 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002105 switch (len) {
2106 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002107 *data = ldub_p(buf);
2108 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002109 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002110 *data = lduw_p(buf);
2111 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002112 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002113 *data = ldl_p(buf);
2114 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002115 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002116 *data = ldq_p(buf);
2117 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002118 default:
2119 abort();
2120 }
blueswir1db7b5422007-05-26 17:36:03 +00002121}
2122
Peter Maydellf25a49e2015-04-26 16:49:24 +01002123static MemTxResult subpage_write(void *opaque, hwaddr addr,
2124 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002125{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002126 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002127 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128
blueswir1db7b5422007-05-26 17:36:03 +00002129#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002130 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002131 " value %"PRIx64"\n",
2132 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002133#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 switch (len) {
2135 case 1:
2136 stb_p(buf, value);
2137 break;
2138 case 2:
2139 stw_p(buf, value);
2140 break;
2141 case 4:
2142 stl_p(buf, value);
2143 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002144 case 8:
2145 stq_p(buf, value);
2146 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002147 default:
2148 abort();
2149 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002150 return address_space_write(subpage->as, addr + subpage->base,
2151 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002152}
2153
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002154static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002155 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002156{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002157 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002158#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002159 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002160 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002161#endif
2162
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002163 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002164 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002165}
2166
Avi Kivity70c68e42012-01-02 12:32:48 +02002167static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002168 .read_with_attrs = subpage_read,
2169 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002170 .impl.min_access_size = 1,
2171 .impl.max_access_size = 8,
2172 .valid.min_access_size = 1,
2173 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002174 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002175 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002176};
2177
Anthony Liguoric227f092009-10-01 16:12:16 -05002178static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002179 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002180{
2181 int idx, eidx;
2182
2183 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2184 return -1;
2185 idx = SUBPAGE_IDX(start);
2186 eidx = SUBPAGE_IDX(end);
2187#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002188 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2189 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002190#endif
blueswir1db7b5422007-05-26 17:36:03 +00002191 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002192 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002193 }
2194
2195 return 0;
2196}
2197
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002198static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002199{
Anthony Liguoric227f092009-10-01 16:12:16 -05002200 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002201
Anthony Liguori7267c092011-08-20 22:09:37 -05002202 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002203
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002204 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002205 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002206 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002207 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002208 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002209#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002210 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2211 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002212#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002213 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002214
2215 return mmio;
2216}
2217
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002218static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2219 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002220{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002221 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002222 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002223 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002224 .mr = mr,
2225 .offset_within_address_space = 0,
2226 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002227 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002228 };
2229
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002230 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002231}
2232
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002233MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002234{
Peter Maydell32857f42015-10-01 15:29:50 +01002235 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2236 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002237 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002238
2239 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002240}
2241
Avi Kivitye9179ce2009-06-14 11:38:52 +03002242static void io_mem_init(void)
2243{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002244 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002245 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002246 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002247 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002248 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002249 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002250 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002251}
2252
Avi Kivityac1970f2012-10-03 16:22:53 +02002253static void mem_begin(MemoryListener *listener)
2254{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002255 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002256 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2257 uint16_t n;
2258
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002259 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002260 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002261 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002262 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002263 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002264 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002265 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002266 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002267
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002268 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002269 d->as = as;
2270 as->next_dispatch = d;
2271}
2272
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002273static void address_space_dispatch_free(AddressSpaceDispatch *d)
2274{
2275 phys_sections_free(&d->map);
2276 g_free(d);
2277}
2278
Paolo Bonzini00752702013-05-29 12:13:54 +02002279static void mem_commit(MemoryListener *listener)
2280{
2281 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002282 AddressSpaceDispatch *cur = as->dispatch;
2283 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002284
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002285 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002286
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002287 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002288 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002289 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002290 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002291}
2292
Avi Kivity1d711482012-10-02 18:54:45 +02002293static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002294{
Peter Maydell32857f42015-10-01 15:29:50 +01002295 CPUAddressSpace *cpuas;
2296 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002297
2298 /* since each CPU stores ram addresses in its TLB cache, we must
2299 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002300 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2301 cpu_reloading_memory_map();
2302 /* The CPU and TLB are protected by the iothread lock.
2303 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2304 * may have split the RCU critical section.
2305 */
2306 d = atomic_rcu_read(&cpuas->as->dispatch);
2307 cpuas->memory_dispatch = d;
2308 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002309}
2310
Avi Kivityac1970f2012-10-03 16:22:53 +02002311void address_space_init_dispatch(AddressSpace *as)
2312{
Paolo Bonzini00752702013-05-29 12:13:54 +02002313 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002314 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002315 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002316 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002317 .region_add = mem_add,
2318 .region_nop = mem_add,
2319 .priority = 0,
2320 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002321 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002322}
2323
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002324void address_space_unregister(AddressSpace *as)
2325{
2326 memory_listener_unregister(&as->dispatch_listener);
2327}
2328
Avi Kivity83f3c252012-10-07 12:59:55 +02002329void address_space_destroy_dispatch(AddressSpace *as)
2330{
2331 AddressSpaceDispatch *d = as->dispatch;
2332
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002333 atomic_rcu_set(&as->dispatch, NULL);
2334 if (d) {
2335 call_rcu(d, address_space_dispatch_free, rcu);
2336 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002337}
2338
Avi Kivity62152b82011-07-26 14:26:14 +03002339static void memory_map_init(void)
2340{
Anthony Liguori7267c092011-08-20 22:09:37 -05002341 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002342
Paolo Bonzini57271d62013-11-07 17:14:37 +01002343 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002344 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002345
Anthony Liguori7267c092011-08-20 22:09:37 -05002346 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002347 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2348 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002349 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002350}
2351
2352MemoryRegion *get_system_memory(void)
2353{
2354 return system_memory;
2355}
2356
Avi Kivity309cb472011-08-08 16:09:03 +03002357MemoryRegion *get_system_io(void)
2358{
2359 return system_io;
2360}
2361
pbrooke2eef172008-06-08 01:09:01 +00002362#endif /* !defined(CONFIG_USER_ONLY) */
2363
bellard13eb76e2004-01-24 15:23:36 +00002364/* physical memory access (slow version, mainly for debug) */
2365#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002366int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002367 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002368{
2369 int l, flags;
2370 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002371 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002372
2373 while (len > 0) {
2374 page = addr & TARGET_PAGE_MASK;
2375 l = (page + TARGET_PAGE_SIZE) - addr;
2376 if (l > len)
2377 l = len;
2378 flags = page_get_flags(page);
2379 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002380 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002381 if (is_write) {
2382 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002383 return -1;
bellard579a97f2007-11-11 14:26:47 +00002384 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002385 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002386 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002387 memcpy(p, buf, l);
2388 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002389 } else {
2390 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002391 return -1;
bellard579a97f2007-11-11 14:26:47 +00002392 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002393 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002394 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002395 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002396 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002397 }
2398 len -= l;
2399 buf += l;
2400 addr += l;
2401 }
Paul Brooka68fe892010-03-01 00:08:59 +00002402 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002403}
bellard8df1cd02005-01-28 22:37:22 +00002404
bellard13eb76e2004-01-24 15:23:36 +00002405#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002406
Paolo Bonzini845b6212015-03-23 11:45:53 +01002407static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002408 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002409{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002410 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2411 /* No early return if dirty_log_mask is or becomes 0, because
2412 * cpu_physical_memory_set_dirty_range will still call
2413 * xen_modified_memory.
2414 */
2415 if (dirty_log_mask) {
2416 dirty_log_mask =
2417 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002418 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002419 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2420 tb_invalidate_phys_range(addr, addr + length);
2421 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2422 }
2423 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002424}
2425
Richard Henderson23326162013-07-08 14:55:59 -07002426static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002427{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002428 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002429
2430 /* Regions are assumed to support 1-4 byte accesses unless
2431 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002432 if (access_size_max == 0) {
2433 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002434 }
Richard Henderson23326162013-07-08 14:55:59 -07002435
2436 /* Bound the maximum access by the alignment of the address. */
2437 if (!mr->ops->impl.unaligned) {
2438 unsigned align_size_max = addr & -addr;
2439 if (align_size_max != 0 && align_size_max < access_size_max) {
2440 access_size_max = align_size_max;
2441 }
2442 }
2443
2444 /* Don't attempt accesses larger than the maximum. */
2445 if (l > access_size_max) {
2446 l = access_size_max;
2447 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002448 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002449
2450 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002451}
2452
Jan Kiszka4840f102015-06-18 18:47:22 +02002453static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002454{
Jan Kiszka4840f102015-06-18 18:47:22 +02002455 bool unlocked = !qemu_mutex_iothread_locked();
2456 bool release_lock = false;
2457
2458 if (unlocked && mr->global_locking) {
2459 qemu_mutex_lock_iothread();
2460 unlocked = false;
2461 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002462 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002463 if (mr->flush_coalesced_mmio) {
2464 if (unlocked) {
2465 qemu_mutex_lock_iothread();
2466 }
2467 qemu_flush_coalesced_mmio_buffer();
2468 if (unlocked) {
2469 qemu_mutex_unlock_iothread();
2470 }
2471 }
2472
2473 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002474}
2475
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002476/* Called within RCU critical section. */
2477static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2478 MemTxAttrs attrs,
2479 const uint8_t *buf,
2480 int len, hwaddr addr1,
2481 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002482{
bellard13eb76e2004-01-24 15:23:36 +00002483 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002484 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002485 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002486 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002487
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002488 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002489 if (!memory_access_is_direct(mr, true)) {
2490 release_lock |= prepare_mmio_access(mr);
2491 l = memory_access_size(mr, l, addr1);
2492 /* XXX: could force current_cpu to NULL to avoid
2493 potential bugs */
2494 switch (l) {
2495 case 8:
2496 /* 64 bit write access */
2497 val = ldq_p(buf);
2498 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2499 attrs);
2500 break;
2501 case 4:
2502 /* 32 bit write access */
2503 val = ldl_p(buf);
2504 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2505 attrs);
2506 break;
2507 case 2:
2508 /* 16 bit write access */
2509 val = lduw_p(buf);
2510 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2511 attrs);
2512 break;
2513 case 1:
2514 /* 8 bit write access */
2515 val = ldub_p(buf);
2516 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2517 attrs);
2518 break;
2519 default:
2520 abort();
bellard13eb76e2004-01-24 15:23:36 +00002521 }
2522 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002523 addr1 += memory_region_get_ram_addr(mr);
2524 /* RAM case */
2525 ptr = qemu_get_ram_ptr(addr1);
2526 memcpy(ptr, buf, l);
2527 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002528 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002529
2530 if (release_lock) {
2531 qemu_mutex_unlock_iothread();
2532 release_lock = false;
2533 }
2534
bellard13eb76e2004-01-24 15:23:36 +00002535 len -= l;
2536 buf += l;
2537 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002538
2539 if (!len) {
2540 break;
2541 }
2542
2543 l = len;
2544 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002545 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002546
Peter Maydell3b643492015-04-26 16:49:23 +01002547 return result;
bellard13eb76e2004-01-24 15:23:36 +00002548}
bellard8df1cd02005-01-28 22:37:22 +00002549
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002550MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2551 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002552{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002553 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002554 hwaddr addr1;
2555 MemoryRegion *mr;
2556 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002557
2558 if (len > 0) {
2559 rcu_read_lock();
2560 l = len;
2561 mr = address_space_translate(as, addr, &addr1, &l, true);
2562 result = address_space_write_continue(as, addr, attrs, buf, len,
2563 addr1, l, mr);
2564 rcu_read_unlock();
2565 }
2566
2567 return result;
2568}
2569
2570/* Called within RCU critical section. */
2571MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2572 MemTxAttrs attrs, uint8_t *buf,
2573 int len, hwaddr addr1, hwaddr l,
2574 MemoryRegion *mr)
2575{
2576 uint8_t *ptr;
2577 uint64_t val;
2578 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002579 bool release_lock = false;
2580
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002581 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002582 if (!memory_access_is_direct(mr, false)) {
2583 /* I/O case */
2584 release_lock |= prepare_mmio_access(mr);
2585 l = memory_access_size(mr, l, addr1);
2586 switch (l) {
2587 case 8:
2588 /* 64 bit read access */
2589 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2590 attrs);
2591 stq_p(buf, val);
2592 break;
2593 case 4:
2594 /* 32 bit read access */
2595 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2596 attrs);
2597 stl_p(buf, val);
2598 break;
2599 case 2:
2600 /* 16 bit read access */
2601 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2602 attrs);
2603 stw_p(buf, val);
2604 break;
2605 case 1:
2606 /* 8 bit read access */
2607 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2608 attrs);
2609 stb_p(buf, val);
2610 break;
2611 default:
2612 abort();
2613 }
2614 } else {
2615 /* RAM case */
2616 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2617 memcpy(buf, ptr, l);
2618 }
2619
2620 if (release_lock) {
2621 qemu_mutex_unlock_iothread();
2622 release_lock = false;
2623 }
2624
2625 len -= l;
2626 buf += l;
2627 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002628
2629 if (!len) {
2630 break;
2631 }
2632
2633 l = len;
2634 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002635 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002636
2637 return result;
2638}
2639
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002640MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2641 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002642{
2643 hwaddr l;
2644 hwaddr addr1;
2645 MemoryRegion *mr;
2646 MemTxResult result = MEMTX_OK;
2647
2648 if (len > 0) {
2649 rcu_read_lock();
2650 l = len;
2651 mr = address_space_translate(as, addr, &addr1, &l, false);
2652 result = address_space_read_continue(as, addr, attrs, buf, len,
2653 addr1, l, mr);
2654 rcu_read_unlock();
2655 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002656
2657 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002658}
2659
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002660MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2661 uint8_t *buf, int len, bool is_write)
2662{
2663 if (is_write) {
2664 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2665 } else {
2666 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2667 }
2668}
Avi Kivityac1970f2012-10-03 16:22:53 +02002669
Avi Kivitya8170e52012-10-23 12:30:10 +02002670void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002671 int len, int is_write)
2672{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002673 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2674 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002675}
2676
Alexander Graf582b55a2013-12-11 14:17:44 +01002677enum write_rom_type {
2678 WRITE_DATA,
2679 FLUSH_CACHE,
2680};
2681
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002682static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002683 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002684{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002685 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002686 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002687 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002688 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002689
Paolo Bonzini41063e12015-03-18 14:21:43 +01002690 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002691 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002692 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002693 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002694
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002695 if (!(memory_region_is_ram(mr) ||
2696 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002697 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002698 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002699 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002700 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002701 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002702 switch (type) {
2703 case WRITE_DATA:
2704 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002705 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002706 break;
2707 case FLUSH_CACHE:
2708 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2709 break;
2710 }
bellardd0ecd2a2006-04-23 17:14:48 +00002711 }
2712 len -= l;
2713 buf += l;
2714 addr += l;
2715 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002716 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002717}
2718
Alexander Graf582b55a2013-12-11 14:17:44 +01002719/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002720void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002721 const uint8_t *buf, int len)
2722{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002723 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002724}
2725
2726void cpu_flush_icache_range(hwaddr start, int len)
2727{
2728 /*
2729 * This function should do the same thing as an icache flush that was
2730 * triggered from within the guest. For TCG we are always cache coherent,
2731 * so there is no need to flush anything. For KVM / Xen we need to flush
2732 * the host's instruction cache at least.
2733 */
2734 if (tcg_enabled()) {
2735 return;
2736 }
2737
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002738 cpu_physical_memory_write_rom_internal(&address_space_memory,
2739 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002740}
2741
aliguori6d16c2f2009-01-22 16:59:11 +00002742typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002743 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002744 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002745 hwaddr addr;
2746 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002747 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002748} BounceBuffer;
2749
2750static BounceBuffer bounce;
2751
aliguoriba223c22009-01-22 16:59:16 +00002752typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002753 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002754 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002755} MapClient;
2756
Fam Zheng38e047b2015-03-16 17:03:35 +08002757QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002758static QLIST_HEAD(map_client_list, MapClient) map_client_list
2759 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002760
Fam Zhenge95205e2015-03-16 17:03:37 +08002761static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002762{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002763 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002764 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002765}
2766
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002767static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002768{
2769 MapClient *client;
2770
Blue Swirl72cf2d42009-09-12 07:36:22 +00002771 while (!QLIST_EMPTY(&map_client_list)) {
2772 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002773 qemu_bh_schedule(client->bh);
2774 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002775 }
2776}
2777
Fam Zhenge95205e2015-03-16 17:03:37 +08002778void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002779{
2780 MapClient *client = g_malloc(sizeof(*client));
2781
Fam Zheng38e047b2015-03-16 17:03:35 +08002782 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002783 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002784 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002785 if (!atomic_read(&bounce.in_use)) {
2786 cpu_notify_map_clients_locked();
2787 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002788 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002789}
2790
Fam Zheng38e047b2015-03-16 17:03:35 +08002791void cpu_exec_init_all(void)
2792{
2793 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002794 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002795 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002796 qemu_mutex_init(&map_client_list_lock);
2797}
2798
Fam Zhenge95205e2015-03-16 17:03:37 +08002799void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002800{
Fam Zhenge95205e2015-03-16 17:03:37 +08002801 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002802
Fam Zhenge95205e2015-03-16 17:03:37 +08002803 qemu_mutex_lock(&map_client_list_lock);
2804 QLIST_FOREACH(client, &map_client_list, link) {
2805 if (client->bh == bh) {
2806 cpu_unregister_map_client_do(client);
2807 break;
2808 }
2809 }
2810 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002811}
2812
2813static void cpu_notify_map_clients(void)
2814{
Fam Zheng38e047b2015-03-16 17:03:35 +08002815 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002816 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002817 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002818}
2819
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002820bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2821{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002822 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002823 hwaddr l, xlat;
2824
Paolo Bonzini41063e12015-03-18 14:21:43 +01002825 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002826 while (len > 0) {
2827 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002828 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2829 if (!memory_access_is_direct(mr, is_write)) {
2830 l = memory_access_size(mr, l, addr);
2831 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002832 return false;
2833 }
2834 }
2835
2836 len -= l;
2837 addr += l;
2838 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002839 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002840 return true;
2841}
2842
aliguori6d16c2f2009-01-22 16:59:11 +00002843/* Map a physical memory region into a host virtual address.
2844 * May map a subset of the requested range, given by and returned in *plen.
2845 * May return NULL if resources needed to perform the mapping are exhausted.
2846 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002847 * Use cpu_register_map_client() to know when retrying the map operation is
2848 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002849 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002850void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002851 hwaddr addr,
2852 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002853 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002854{
Avi Kivitya8170e52012-10-23 12:30:10 +02002855 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002856 hwaddr done = 0;
2857 hwaddr l, xlat, base;
2858 MemoryRegion *mr, *this_mr;
2859 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002860 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002861
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002862 if (len == 0) {
2863 return NULL;
2864 }
aliguori6d16c2f2009-01-22 16:59:11 +00002865
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002866 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002867 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002868 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002869
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002870 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002871 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002872 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002873 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002874 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002875 /* Avoid unbounded allocations */
2876 l = MIN(l, TARGET_PAGE_SIZE);
2877 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002878 bounce.addr = addr;
2879 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002880
2881 memory_region_ref(mr);
2882 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002883 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002884 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2885 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002886 }
aliguori6d16c2f2009-01-22 16:59:11 +00002887
Paolo Bonzini41063e12015-03-18 14:21:43 +01002888 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002889 *plen = l;
2890 return bounce.buffer;
2891 }
2892
2893 base = xlat;
2894 raddr = memory_region_get_ram_addr(mr);
2895
2896 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002897 len -= l;
2898 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002899 done += l;
2900 if (len == 0) {
2901 break;
2902 }
2903
2904 l = len;
2905 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2906 if (this_mr != mr || xlat != base + done) {
2907 break;
2908 }
aliguori6d16c2f2009-01-22 16:59:11 +00002909 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002910
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002911 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002912 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002913 ptr = qemu_ram_ptr_length(raddr + base, plen);
2914 rcu_read_unlock();
2915
2916 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002917}
2918
Avi Kivityac1970f2012-10-03 16:22:53 +02002919/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002920 * Will also mark the memory as dirty if is_write == 1. access_len gives
2921 * the amount of memory that was actually read or written by the caller.
2922 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002923void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2924 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002925{
2926 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002927 MemoryRegion *mr;
2928 ram_addr_t addr1;
2929
2930 mr = qemu_ram_addr_from_host(buffer, &addr1);
2931 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002932 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002933 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002934 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002935 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002936 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002937 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002938 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002939 return;
2940 }
2941 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002942 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2943 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002944 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002945 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002946 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002947 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002948 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002949 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002950}
bellardd0ecd2a2006-04-23 17:14:48 +00002951
Avi Kivitya8170e52012-10-23 12:30:10 +02002952void *cpu_physical_memory_map(hwaddr addr,
2953 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002954 int is_write)
2955{
2956 return address_space_map(&address_space_memory, addr, plen, is_write);
2957}
2958
Avi Kivitya8170e52012-10-23 12:30:10 +02002959void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2960 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002961{
2962 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2963}
2964
bellard8df1cd02005-01-28 22:37:22 +00002965/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002966static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2967 MemTxAttrs attrs,
2968 MemTxResult *result,
2969 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002970{
bellard8df1cd02005-01-28 22:37:22 +00002971 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002972 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002973 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002974 hwaddr l = 4;
2975 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002976 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002977 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002978
Paolo Bonzini41063e12015-03-18 14:21:43 +01002979 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002980 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002981 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002982 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002983
bellard8df1cd02005-01-28 22:37:22 +00002984 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002985 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002986#if defined(TARGET_WORDS_BIGENDIAN)
2987 if (endian == DEVICE_LITTLE_ENDIAN) {
2988 val = bswap32(val);
2989 }
2990#else
2991 if (endian == DEVICE_BIG_ENDIAN) {
2992 val = bswap32(val);
2993 }
2994#endif
bellard8df1cd02005-01-28 22:37:22 +00002995 } else {
2996 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002997 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002998 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002999 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003000 switch (endian) {
3001 case DEVICE_LITTLE_ENDIAN:
3002 val = ldl_le_p(ptr);
3003 break;
3004 case DEVICE_BIG_ENDIAN:
3005 val = ldl_be_p(ptr);
3006 break;
3007 default:
3008 val = ldl_p(ptr);
3009 break;
3010 }
Peter Maydell50013112015-04-26 16:49:24 +01003011 r = MEMTX_OK;
3012 }
3013 if (result) {
3014 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003015 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003016 if (release_lock) {
3017 qemu_mutex_unlock_iothread();
3018 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003019 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003020 return val;
3021}
3022
Peter Maydell50013112015-04-26 16:49:24 +01003023uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3024 MemTxAttrs attrs, MemTxResult *result)
3025{
3026 return address_space_ldl_internal(as, addr, attrs, result,
3027 DEVICE_NATIVE_ENDIAN);
3028}
3029
3030uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3031 MemTxAttrs attrs, MemTxResult *result)
3032{
3033 return address_space_ldl_internal(as, addr, attrs, result,
3034 DEVICE_LITTLE_ENDIAN);
3035}
3036
3037uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3038 MemTxAttrs attrs, MemTxResult *result)
3039{
3040 return address_space_ldl_internal(as, addr, attrs, result,
3041 DEVICE_BIG_ENDIAN);
3042}
3043
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003044uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003045{
Peter Maydell50013112015-04-26 16:49:24 +01003046 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003047}
3048
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003049uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003050{
Peter Maydell50013112015-04-26 16:49:24 +01003051 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003052}
3053
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003054uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003055{
Peter Maydell50013112015-04-26 16:49:24 +01003056 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003057}
3058
bellard84b7b8e2005-11-28 21:19:04 +00003059/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003060static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3061 MemTxAttrs attrs,
3062 MemTxResult *result,
3063 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003064{
bellard84b7b8e2005-11-28 21:19:04 +00003065 uint8_t *ptr;
3066 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003067 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003068 hwaddr l = 8;
3069 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003070 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003071 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003072
Paolo Bonzini41063e12015-03-18 14:21:43 +01003073 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003074 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003075 false);
3076 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003077 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003078
bellard84b7b8e2005-11-28 21:19:04 +00003079 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003080 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003081#if defined(TARGET_WORDS_BIGENDIAN)
3082 if (endian == DEVICE_LITTLE_ENDIAN) {
3083 val = bswap64(val);
3084 }
3085#else
3086 if (endian == DEVICE_BIG_ENDIAN) {
3087 val = bswap64(val);
3088 }
3089#endif
bellard84b7b8e2005-11-28 21:19:04 +00003090 } else {
3091 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003092 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003093 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003094 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003095 switch (endian) {
3096 case DEVICE_LITTLE_ENDIAN:
3097 val = ldq_le_p(ptr);
3098 break;
3099 case DEVICE_BIG_ENDIAN:
3100 val = ldq_be_p(ptr);
3101 break;
3102 default:
3103 val = ldq_p(ptr);
3104 break;
3105 }
Peter Maydell50013112015-04-26 16:49:24 +01003106 r = MEMTX_OK;
3107 }
3108 if (result) {
3109 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003110 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003111 if (release_lock) {
3112 qemu_mutex_unlock_iothread();
3113 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003114 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003115 return val;
3116}
3117
Peter Maydell50013112015-04-26 16:49:24 +01003118uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3119 MemTxAttrs attrs, MemTxResult *result)
3120{
3121 return address_space_ldq_internal(as, addr, attrs, result,
3122 DEVICE_NATIVE_ENDIAN);
3123}
3124
3125uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3126 MemTxAttrs attrs, MemTxResult *result)
3127{
3128 return address_space_ldq_internal(as, addr, attrs, result,
3129 DEVICE_LITTLE_ENDIAN);
3130}
3131
3132uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3133 MemTxAttrs attrs, MemTxResult *result)
3134{
3135 return address_space_ldq_internal(as, addr, attrs, result,
3136 DEVICE_BIG_ENDIAN);
3137}
3138
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003139uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003140{
Peter Maydell50013112015-04-26 16:49:24 +01003141 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142}
3143
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003144uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003145{
Peter Maydell50013112015-04-26 16:49:24 +01003146 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003147}
3148
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003149uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003150{
Peter Maydell50013112015-04-26 16:49:24 +01003151 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152}
3153
bellardaab33092005-10-30 20:48:42 +00003154/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003155uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3156 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003157{
3158 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003159 MemTxResult r;
3160
3161 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3162 if (result) {
3163 *result = r;
3164 }
bellardaab33092005-10-30 20:48:42 +00003165 return val;
3166}
3167
Peter Maydell50013112015-04-26 16:49:24 +01003168uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3169{
3170 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3171}
3172
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003173/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003174static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3175 hwaddr addr,
3176 MemTxAttrs attrs,
3177 MemTxResult *result,
3178 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003179{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003180 uint8_t *ptr;
3181 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003182 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003183 hwaddr l = 2;
3184 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003185 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003186 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003187
Paolo Bonzini41063e12015-03-18 14:21:43 +01003188 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003189 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003190 false);
3191 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003192 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003193
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003194 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003195 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003196#if defined(TARGET_WORDS_BIGENDIAN)
3197 if (endian == DEVICE_LITTLE_ENDIAN) {
3198 val = bswap16(val);
3199 }
3200#else
3201 if (endian == DEVICE_BIG_ENDIAN) {
3202 val = bswap16(val);
3203 }
3204#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003205 } else {
3206 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003207 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003208 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003209 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003210 switch (endian) {
3211 case DEVICE_LITTLE_ENDIAN:
3212 val = lduw_le_p(ptr);
3213 break;
3214 case DEVICE_BIG_ENDIAN:
3215 val = lduw_be_p(ptr);
3216 break;
3217 default:
3218 val = lduw_p(ptr);
3219 break;
3220 }
Peter Maydell50013112015-04-26 16:49:24 +01003221 r = MEMTX_OK;
3222 }
3223 if (result) {
3224 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003225 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003226 if (release_lock) {
3227 qemu_mutex_unlock_iothread();
3228 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003229 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003230 return val;
bellardaab33092005-10-30 20:48:42 +00003231}
3232
Peter Maydell50013112015-04-26 16:49:24 +01003233uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3234 MemTxAttrs attrs, MemTxResult *result)
3235{
3236 return address_space_lduw_internal(as, addr, attrs, result,
3237 DEVICE_NATIVE_ENDIAN);
3238}
3239
3240uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3241 MemTxAttrs attrs, MemTxResult *result)
3242{
3243 return address_space_lduw_internal(as, addr, attrs, result,
3244 DEVICE_LITTLE_ENDIAN);
3245}
3246
3247uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3248 MemTxAttrs attrs, MemTxResult *result)
3249{
3250 return address_space_lduw_internal(as, addr, attrs, result,
3251 DEVICE_BIG_ENDIAN);
3252}
3253
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003254uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255{
Peter Maydell50013112015-04-26 16:49:24 +01003256 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003257}
3258
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003259uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260{
Peter Maydell50013112015-04-26 16:49:24 +01003261 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003262}
3263
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003264uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003265{
Peter Maydell50013112015-04-26 16:49:24 +01003266 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003267}
3268
bellard8df1cd02005-01-28 22:37:22 +00003269/* warning: addr must be aligned. The ram page is not masked as dirty
3270 and the code inside is not invalidated. It is useful if the dirty
3271 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003272void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3273 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003274{
bellard8df1cd02005-01-28 22:37:22 +00003275 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003276 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003277 hwaddr l = 4;
3278 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003279 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003280 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003281 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003282
Paolo Bonzini41063e12015-03-18 14:21:43 +01003283 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003284 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003285 true);
3286 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003287 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003288
Peter Maydell50013112015-04-26 16:49:24 +01003289 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003290 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003291 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003292 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003293 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003294
Paolo Bonzini845b6212015-03-23 11:45:53 +01003295 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3296 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003297 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003298 r = MEMTX_OK;
3299 }
3300 if (result) {
3301 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003302 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003303 if (release_lock) {
3304 qemu_mutex_unlock_iothread();
3305 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003306 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003307}
3308
Peter Maydell50013112015-04-26 16:49:24 +01003309void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3310{
3311 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3312}
3313
bellard8df1cd02005-01-28 22:37:22 +00003314/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003315static inline void address_space_stl_internal(AddressSpace *as,
3316 hwaddr addr, uint32_t val,
3317 MemTxAttrs attrs,
3318 MemTxResult *result,
3319 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003320{
bellard8df1cd02005-01-28 22:37:22 +00003321 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003322 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003323 hwaddr l = 4;
3324 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003325 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003326 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003327
Paolo Bonzini41063e12015-03-18 14:21:43 +01003328 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003329 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003330 true);
3331 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003332 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003333
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003334#if defined(TARGET_WORDS_BIGENDIAN)
3335 if (endian == DEVICE_LITTLE_ENDIAN) {
3336 val = bswap32(val);
3337 }
3338#else
3339 if (endian == DEVICE_BIG_ENDIAN) {
3340 val = bswap32(val);
3341 }
3342#endif
Peter Maydell50013112015-04-26 16:49:24 +01003343 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003344 } else {
bellard8df1cd02005-01-28 22:37:22 +00003345 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003346 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003347 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003348 switch (endian) {
3349 case DEVICE_LITTLE_ENDIAN:
3350 stl_le_p(ptr, val);
3351 break;
3352 case DEVICE_BIG_ENDIAN:
3353 stl_be_p(ptr, val);
3354 break;
3355 default:
3356 stl_p(ptr, val);
3357 break;
3358 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003359 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003360 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003361 }
Peter Maydell50013112015-04-26 16:49:24 +01003362 if (result) {
3363 *result = r;
3364 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003365 if (release_lock) {
3366 qemu_mutex_unlock_iothread();
3367 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003368 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003369}
3370
3371void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3372 MemTxAttrs attrs, MemTxResult *result)
3373{
3374 address_space_stl_internal(as, addr, val, attrs, result,
3375 DEVICE_NATIVE_ENDIAN);
3376}
3377
3378void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3379 MemTxAttrs attrs, MemTxResult *result)
3380{
3381 address_space_stl_internal(as, addr, val, attrs, result,
3382 DEVICE_LITTLE_ENDIAN);
3383}
3384
3385void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3386 MemTxAttrs attrs, MemTxResult *result)
3387{
3388 address_space_stl_internal(as, addr, val, attrs, result,
3389 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003390}
3391
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003392void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003393{
Peter Maydell50013112015-04-26 16:49:24 +01003394 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003395}
3396
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003397void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003398{
Peter Maydell50013112015-04-26 16:49:24 +01003399 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003400}
3401
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003402void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003403{
Peter Maydell50013112015-04-26 16:49:24 +01003404 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003405}
3406
bellardaab33092005-10-30 20:48:42 +00003407/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003408void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3409 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003410{
3411 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003412 MemTxResult r;
3413
3414 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3415 if (result) {
3416 *result = r;
3417 }
3418}
3419
3420void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3421{
3422 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003423}
3424
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003425/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003426static inline void address_space_stw_internal(AddressSpace *as,
3427 hwaddr addr, uint32_t val,
3428 MemTxAttrs attrs,
3429 MemTxResult *result,
3430 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003431{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003432 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003433 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003434 hwaddr l = 2;
3435 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003436 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003437 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003438
Paolo Bonzini41063e12015-03-18 14:21:43 +01003439 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003440 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003441 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003442 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003443
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003444#if defined(TARGET_WORDS_BIGENDIAN)
3445 if (endian == DEVICE_LITTLE_ENDIAN) {
3446 val = bswap16(val);
3447 }
3448#else
3449 if (endian == DEVICE_BIG_ENDIAN) {
3450 val = bswap16(val);
3451 }
3452#endif
Peter Maydell50013112015-04-26 16:49:24 +01003453 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003454 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003455 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003456 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003457 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003458 switch (endian) {
3459 case DEVICE_LITTLE_ENDIAN:
3460 stw_le_p(ptr, val);
3461 break;
3462 case DEVICE_BIG_ENDIAN:
3463 stw_be_p(ptr, val);
3464 break;
3465 default:
3466 stw_p(ptr, val);
3467 break;
3468 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003469 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003470 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003471 }
Peter Maydell50013112015-04-26 16:49:24 +01003472 if (result) {
3473 *result = r;
3474 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003475 if (release_lock) {
3476 qemu_mutex_unlock_iothread();
3477 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003478 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003479}
3480
3481void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3482 MemTxAttrs attrs, MemTxResult *result)
3483{
3484 address_space_stw_internal(as, addr, val, attrs, result,
3485 DEVICE_NATIVE_ENDIAN);
3486}
3487
3488void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3489 MemTxAttrs attrs, MemTxResult *result)
3490{
3491 address_space_stw_internal(as, addr, val, attrs, result,
3492 DEVICE_LITTLE_ENDIAN);
3493}
3494
3495void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3496 MemTxAttrs attrs, MemTxResult *result)
3497{
3498 address_space_stw_internal(as, addr, val, attrs, result,
3499 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003500}
3501
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003502void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003503{
Peter Maydell50013112015-04-26 16:49:24 +01003504 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003505}
3506
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003507void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003508{
Peter Maydell50013112015-04-26 16:49:24 +01003509 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003510}
3511
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003512void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003513{
Peter Maydell50013112015-04-26 16:49:24 +01003514 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003515}
3516
bellardaab33092005-10-30 20:48:42 +00003517/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003518void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3519 MemTxAttrs attrs, MemTxResult *result)
3520{
3521 MemTxResult r;
3522 val = tswap64(val);
3523 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3524 if (result) {
3525 *result = r;
3526 }
3527}
3528
3529void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3530 MemTxAttrs attrs, MemTxResult *result)
3531{
3532 MemTxResult r;
3533 val = cpu_to_le64(val);
3534 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3535 if (result) {
3536 *result = r;
3537 }
3538}
3539void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3540 MemTxAttrs attrs, MemTxResult *result)
3541{
3542 MemTxResult r;
3543 val = cpu_to_be64(val);
3544 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3545 if (result) {
3546 *result = r;
3547 }
3548}
3549
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003550void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003551{
Peter Maydell50013112015-04-26 16:49:24 +01003552 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003553}
3554
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003555void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003556{
Peter Maydell50013112015-04-26 16:49:24 +01003557 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003558}
3559
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003560void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003561{
Peter Maydell50013112015-04-26 16:49:24 +01003562 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003563}
3564
aliguori5e2972f2009-03-28 17:51:36 +00003565/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003566int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003567 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003568{
3569 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003570 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003571 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003572
3573 while (len > 0) {
3574 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003575 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003576 /* if no physical page mapped, return an error */
3577 if (phys_addr == -1)
3578 return -1;
3579 l = (page + TARGET_PAGE_SIZE) - addr;
3580 if (l > len)
3581 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003582 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003583 if (is_write) {
3584 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3585 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003586 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3587 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003588 }
bellard13eb76e2004-01-24 15:23:36 +00003589 len -= l;
3590 buf += l;
3591 addr += l;
3592 }
3593 return 0;
3594}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003595
3596/*
3597 * Allows code that needs to deal with migration bitmaps etc to still be built
3598 * target independent.
3599 */
3600size_t qemu_target_page_bits(void)
3601{
3602 return TARGET_PAGE_BITS;
3603}
3604
Paul Brooka68fe892010-03-01 00:08:59 +00003605#endif
bellard13eb76e2004-01-24 15:23:36 +00003606
Blue Swirl8e4a4242013-01-06 18:30:17 +00003607/*
3608 * A helper function for the _utterly broken_ virtio device model to find out if
3609 * it's running on a big endian machine. Don't do this at home kids!
3610 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003611bool target_words_bigendian(void);
3612bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003613{
3614#if defined(TARGET_WORDS_BIGENDIAN)
3615 return true;
3616#else
3617 return false;
3618#endif
3619}
3620
Wen Congyang76f35532012-05-07 12:04:18 +08003621#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003622bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003623{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003624 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003625 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003626 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003627
Paolo Bonzini41063e12015-03-18 14:21:43 +01003628 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003629 mr = address_space_translate(&address_space_memory,
3630 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003631
Paolo Bonzini41063e12015-03-18 14:21:43 +01003632 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3633 rcu_read_unlock();
3634 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003635}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003636
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003637int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003638{
3639 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003640 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003641
Mike Day0dc3f442013-09-05 14:41:35 -04003642 rcu_read_lock();
3643 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003644 ret = func(block->idstr, block->host, block->offset,
3645 block->used_length, opaque);
3646 if (ret) {
3647 break;
3648 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003649 }
Mike Day0dc3f442013-09-05 14:41:35 -04003650 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003651 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003652}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003653#endif