blob: 1e8b51b48bef948c73f6848101731d5fa52a78b0 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030090/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030091 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030092#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000093#endif
bellard9fa3e852004-01-04 18:06:42 +000094
Andreas Färberbdc44642013-06-24 23:50:24 +020095struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000096/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020098__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000099/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000100 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000101 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100102int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000103
pbrooke2eef172008-06-08 01:09:01 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106typedef struct PhysPageEntry PhysPageEntry;
107
108struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200113};
114
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200115#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100118#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200120#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121#define P_L2_SIZE (1 << P_L2_BITS)
122
123#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100128 struct rcu_head rcu;
129
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100139 struct rcu_head rcu;
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141 /* This is a multi-level map on the physical address space.
142 * The bottom level has pointers to MemoryRegionSections.
143 */
144 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200145 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200146 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200147};
148
Jan Kiszka90260c62013-05-26 21:46:51 +0200149#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
150typedef struct subpage_t {
151 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200152 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200153 hwaddr base;
154 uint16_t sub_section[TARGET_PAGE_SIZE];
155} subpage_t;
156
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200157#define PHYS_SECTION_UNASSIGNED 0
158#define PHYS_SECTION_NOTDIRTY 1
159#define PHYS_SECTION_ROM 2
160#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200161
pbrooke2eef172008-06-08 01:09:01 +0000162static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300163static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000164static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000165
Avi Kivity1ec9b902012-01-02 12:47:48 +0200166static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100167
168/**
169 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
170 * @cpu: the CPU whose AddressSpace this is
171 * @as: the AddressSpace itself
172 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
173 * @tcg_as_listener: listener for tracking changes to the AddressSpace
174 */
175struct CPUAddressSpace {
176 CPUState *cpu;
177 AddressSpace *as;
178 struct AddressSpaceDispatch *memory_dispatch;
179 MemoryListener tcg_as_listener;
180};
181
pbrook6658ffb2007-03-16 23:58:11 +0000182#endif
bellard54936002003-05-13 00:25:15 +0000183
Paul Brook6d9a1302010-02-28 23:55:53 +0000184#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200185
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
191 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 }
193}
194
Paolo Bonzinidb946042015-05-21 15:12:29 +0200195static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200196{
197 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200198 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199 PhysPageEntry e;
200 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200202 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200205 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200206
207 e.skip = leaf ? 0 : 1;
208 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100209 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200212 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200213}
214
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
216 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200217 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218{
219 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100220 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200222 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200225 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200227
Paolo Bonzini03f49952013-11-07 17:14:36 +0100228 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200229 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200230 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200231 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200232 *index += step;
233 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200234 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200235 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200236 }
237 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200238 }
239}
240
Avi Kivityac1970f2012-10-03 16:22:53 +0200241static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200242 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200243 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000244{
Avi Kivity29990972012-02-13 20:21:20 +0200245 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000247
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200248 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000249}
250
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200251/* Compact a non leaf page entry. Simply detect that the entry has a single child,
252 * and update our entry so we can skip it and go directly to the destination.
253 */
254static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
255{
256 unsigned valid_ptr = P_L2_SIZE;
257 int valid = 0;
258 PhysPageEntry *p;
259 int i;
260
261 if (lp->ptr == PHYS_MAP_NODE_NIL) {
262 return;
263 }
264
265 p = nodes[lp->ptr];
266 for (i = 0; i < P_L2_SIZE; i++) {
267 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
268 continue;
269 }
270
271 valid_ptr = i;
272 valid++;
273 if (p[i].skip) {
274 phys_page_compact(&p[i], nodes, compacted);
275 }
276 }
277
278 /* We can only compress if there's only one child. */
279 if (valid != 1) {
280 return;
281 }
282
283 assert(valid_ptr < P_L2_SIZE);
284
285 /* Don't compress if it won't fit in the # of bits we have. */
286 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
287 return;
288 }
289
290 lp->ptr = p[valid_ptr].ptr;
291 if (!p[valid_ptr].skip) {
292 /* If our only child is a leaf, make this a leaf. */
293 /* By design, we should have made this node a leaf to begin with so we
294 * should never reach here.
295 * But since it's so simple to handle this, let's do it just in case we
296 * change this rule.
297 */
298 lp->skip = 0;
299 } else {
300 lp->skip += p[valid_ptr].skip;
301 }
302}
303
304static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
305{
306 DECLARE_BITMAP(compacted, nodes_nb);
307
308 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200309 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200310 }
311}
312
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200313static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200314 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000315{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200317 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200318 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200319
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200320 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200321 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200323 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200324 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100325 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200326 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200327
328 if (sections[lp.ptr].size.hi ||
329 range_covers_byte(sections[lp.ptr].offset_within_address_space,
330 sections[lp.ptr].size.lo, addr)) {
331 return &sections[lp.ptr];
332 } else {
333 return &sections[PHYS_SECTION_UNASSIGNED];
334 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200335}
336
Blue Swirle5548612012-04-21 13:08:33 +0000337bool memory_region_is_unassigned(MemoryRegion *mr)
338{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200339 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000340 && mr != &io_mem_watch;
341}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200342
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100343/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200344static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200345 hwaddr addr,
346 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200347{
Jan Kiszka90260c62013-05-26 21:46:51 +0200348 MemoryRegionSection *section;
349 subpage_t *subpage;
350
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200351 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 if (resolve_subpage && section->mr->subpage) {
353 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200354 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200355 }
356 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200357}
358
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100359/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200360static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200361address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200363{
364 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200365 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100366 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200368 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200369 /* Compute offset within MemoryRegionSection */
370 addr -= section->offset_within_address_space;
371
372 /* Compute offset within MemoryRegion */
373 *xlat = addr + section->offset_within_region;
374
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200375 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200376
377 /* MMIO registers can be expected to perform full-width accesses based only
378 * on their address, without considering adjacent registers that could
379 * decode to completely different MemoryRegions. When such registers
380 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
381 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * here.
383 *
384 * If the length is small (as is the case for address_space_ldl/stl),
385 * everything works fine. If the incoming length is large, however,
386 * the caller really has to do the clamping through memory_access_size.
387 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200389 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200390 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
391 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200392 return section;
393}
Jan Kiszka90260c62013-05-26 21:46:51 +0200394
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100395static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
396{
397 if (memory_region_is_ram(mr)) {
398 return !(is_write && mr->readonly);
399 }
400 if (memory_region_is_romd(mr)) {
401 return !is_write;
402 }
403
404 return false;
405}
406
Paolo Bonzini41063e12015-03-18 14:21:43 +0100407/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200408MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
409 hwaddr *xlat, hwaddr *plen,
410 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200411{
Avi Kivity30951152012-10-30 13:47:46 +0200412 IOMMUTLBEntry iotlb;
413 MemoryRegionSection *section;
414 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200415
416 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100417 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
418 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200419 mr = section->mr;
420
421 if (!mr->iommu_ops) {
422 break;
423 }
424
Le Tan8d7b8cb2014-08-16 13:55:37 +0800425 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200426 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
427 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700428 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200429 if (!(iotlb.perm & (1 << is_write))) {
430 mr = &io_mem_unassigned;
431 break;
432 }
433
434 as = iotlb.target_as;
435 }
436
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000437 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100438 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700439 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100440 }
441
Avi Kivity30951152012-10-30 13:47:46 +0200442 *xlat = addr;
443 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200444}
445
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100446/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200447MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200448address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
449 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200450{
Avi Kivity30951152012-10-30 13:47:46 +0200451 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100452 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200453 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200454
455 assert(!section->mr->iommu_ops);
456 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200457}
bellard9fa3e852004-01-04 18:06:42 +0000458#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000459
Andreas Färberb170fce2013-01-20 20:23:22 +0100460#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000461
Juan Quintelae59fb372009-09-29 22:48:21 +0200462static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200463{
Andreas Färber259186a2013-01-17 18:51:17 +0100464 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200465
aurel323098dba2009-03-07 21:28:24 +0000466 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
467 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100468 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100469 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000470
471 return 0;
472}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474static int cpu_common_pre_load(void *opaque)
475{
476 CPUState *cpu = opaque;
477
Paolo Bonziniadee6422014-12-19 12:53:14 +0100478 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400479
480 return 0;
481}
482
483static bool cpu_common_exception_index_needed(void *opaque)
484{
485 CPUState *cpu = opaque;
486
Paolo Bonziniadee6422014-12-19 12:53:14 +0100487 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400488}
489
490static const VMStateDescription vmstate_cpu_common_exception_index = {
491 .name = "cpu_common/exception_index",
492 .version_id = 1,
493 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200494 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400495 .fields = (VMStateField[]) {
496 VMSTATE_INT32(exception_index, CPUState),
497 VMSTATE_END_OF_LIST()
498 }
499};
500
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300501static bool cpu_common_crash_occurred_needed(void *opaque)
502{
503 CPUState *cpu = opaque;
504
505 return cpu->crash_occurred;
506}
507
508static const VMStateDescription vmstate_cpu_common_crash_occurred = {
509 .name = "cpu_common/crash_occurred",
510 .version_id = 1,
511 .minimum_version_id = 1,
512 .needed = cpu_common_crash_occurred_needed,
513 .fields = (VMStateField[]) {
514 VMSTATE_BOOL(crash_occurred, CPUState),
515 VMSTATE_END_OF_LIST()
516 }
517};
518
Andreas Färber1a1562f2013-06-17 04:09:11 +0200519const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200520 .name = "cpu_common",
521 .version_id = 1,
522 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400523 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200524 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200525 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100526 VMSTATE_UINT32(halted, CPUState),
527 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200528 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400529 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200530 .subsections = (const VMStateDescription*[]) {
531 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300532 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200533 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200534 }
535};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200536
pbrook9656f322008-07-01 20:01:19 +0000537#endif
538
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100539CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400540{
Andreas Färberbdc44642013-06-24 23:50:24 +0200541 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400542
Andreas Färberbdc44642013-06-24 23:50:24 +0200543 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100544 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200545 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100546 }
Glauber Costa950f1472009-06-09 12:15:18 -0400547 }
548
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400550}
551
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000552#if !defined(CONFIG_USER_ONLY)
553void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
554{
555 /* We only support one address space per cpu at the moment. */
556 assert(cpu->as == as);
557
Peter Maydell32857f42015-10-01 15:29:50 +0100558 if (cpu->cpu_ases) {
559 /* We've already registered the listener for our only AS */
560 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000561 }
Peter Maydell32857f42015-10-01 15:29:50 +0100562
563 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
564 cpu->cpu_ases[0].cpu = cpu;
565 cpu->cpu_ases[0].as = as;
566 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
567 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000568}
569#endif
570
Bharata B Raob7bca732015-06-23 19:31:13 -0700571#ifndef CONFIG_USER_ONLY
572static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
573
574static int cpu_get_free_index(Error **errp)
575{
576 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
577
578 if (cpu >= MAX_CPUMASK_BITS) {
579 error_setg(errp, "Trying to use more CPUs than max of %d",
580 MAX_CPUMASK_BITS);
581 return -1;
582 }
583
584 bitmap_set(cpu_index_map, cpu, 1);
585 return cpu;
586}
587
588void cpu_exec_exit(CPUState *cpu)
589{
590 if (cpu->cpu_index == -1) {
591 /* cpu_index was never allocated by this @cpu or was already freed. */
592 return;
593 }
594
595 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
596 cpu->cpu_index = -1;
597}
598#else
599
600static int cpu_get_free_index(Error **errp)
601{
602 CPUState *some_cpu;
603 int cpu_index = 0;
604
605 CPU_FOREACH(some_cpu) {
606 cpu_index++;
607 }
608 return cpu_index;
609}
610
611void cpu_exec_exit(CPUState *cpu)
612{
613}
614#endif
615
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700616void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000617{
Andreas Färberb170fce2013-01-20 20:23:22 +0100618 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000619 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700620 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000621
Eduardo Habkost291135b2015-04-27 17:00:33 -0300622#ifndef CONFIG_USER_ONLY
623 cpu->as = &address_space_memory;
624 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300625#endif
626
pbrookc2764712009-03-07 15:24:59 +0000627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700630 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
631 if (local_err) {
632 error_propagate(errp, local_err);
633#if defined(CONFIG_USER_ONLY)
634 cpu_list_unlock();
635#endif
636 return;
bellard6a00d602005-11-21 23:25:50 +0000637 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200638 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000639#if defined(CONFIG_USER_ONLY)
640 cpu_list_unlock();
641#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
644 }
pbrookb3c77242008-06-30 16:31:04 +0000645#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600646 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700647 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100648 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200649 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000650#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100651 if (cc->vmsd != NULL) {
652 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
653 }
bellardfd6ce8f2003-05-14 19:00:11 +0000654}
655
Paul Brook94df27f2010-02-28 23:47:45 +0000656#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200657static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000658{
659 tb_invalidate_phys_page_range(pc, pc + 1, 0);
660}
661#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200662static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400663{
Max Filippove8262a12013-09-27 22:29:17 +0400664 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
665 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000666 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100667 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400668 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400669}
bellardc27004e2005-01-03 23:35:10 +0000670#endif
bellardd720b932004-04-25 17:57:43 +0000671
Paul Brookc527ee82010-03-01 03:31:14 +0000672#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200673void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000674
675{
676}
677
Peter Maydell3ee887e2014-09-12 14:06:48 +0100678int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
679 int flags)
680{
681 return -ENOSYS;
682}
683
684void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
685{
686}
687
Andreas Färber75a34032013-09-02 16:57:02 +0200688int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000689 int flags, CPUWatchpoint **watchpoint)
690{
691 return -ENOSYS;
692}
693#else
pbrook6658ffb2007-03-16 23:58:11 +0000694/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200695int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000696 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000697{
aliguoric0ce9982008-11-25 22:13:57 +0000698 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000699
Peter Maydell05068c02014-09-12 14:06:48 +0100700 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700701 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200702 error_report("tried to set invalid watchpoint at %"
703 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000704 return -EINVAL;
705 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500706 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000707
aliguoria1d1bb32008-11-18 20:07:32 +0000708 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100709 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000710 wp->flags = flags;
711
aliguori2dc9f412008-11-18 20:56:59 +0000712 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200713 if (flags & BP_GDB) {
714 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
715 } else {
716 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
717 }
aliguoria1d1bb32008-11-18 20:07:32 +0000718
Andreas Färber31b030d2013-09-04 01:29:02 +0200719 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000720
721 if (watchpoint)
722 *watchpoint = wp;
723 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000724}
725
aliguoria1d1bb32008-11-18 20:07:32 +0000726/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200727int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000728 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000729{
aliguoria1d1bb32008-11-18 20:07:32 +0000730 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000731
Andreas Färberff4700b2013-08-26 18:23:18 +0200732 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100733 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000734 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200735 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000736 return 0;
737 }
738 }
aliguoria1d1bb32008-11-18 20:07:32 +0000739 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000740}
741
aliguoria1d1bb32008-11-18 20:07:32 +0000742/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200743void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000744{
Andreas Färberff4700b2013-08-26 18:23:18 +0200745 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000746
Andreas Färber31b030d2013-09-04 01:29:02 +0200747 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000748
Anthony Liguori7267c092011-08-20 22:09:37 -0500749 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000750}
751
aliguoria1d1bb32008-11-18 20:07:32 +0000752/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200753void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000754{
aliguoric0ce9982008-11-25 22:13:57 +0000755 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Andreas Färberff4700b2013-08-26 18:23:18 +0200757 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200758 if (wp->flags & mask) {
759 cpu_watchpoint_remove_by_ref(cpu, wp);
760 }
aliguoric0ce9982008-11-25 22:13:57 +0000761 }
aliguoria1d1bb32008-11-18 20:07:32 +0000762}
Peter Maydell05068c02014-09-12 14:06:48 +0100763
764/* Return true if this watchpoint address matches the specified
765 * access (ie the address range covered by the watchpoint overlaps
766 * partially or completely with the address range covered by the
767 * access).
768 */
769static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
770 vaddr addr,
771 vaddr len)
772{
773 /* We know the lengths are non-zero, but a little caution is
774 * required to avoid errors in the case where the range ends
775 * exactly at the top of the address space and so addr + len
776 * wraps round to zero.
777 */
778 vaddr wpend = wp->vaddr + wp->len - 1;
779 vaddr addrend = addr + len - 1;
780
781 return !(addr > wpend || wp->vaddr > addrend);
782}
783
Paul Brookc527ee82010-03-01 03:31:14 +0000784#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000785
786/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200787int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000788 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000789{
aliguoric0ce9982008-11-25 22:13:57 +0000790 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000791
Anthony Liguori7267c092011-08-20 22:09:37 -0500792 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000793
794 bp->pc = pc;
795 bp->flags = flags;
796
aliguori2dc9f412008-11-18 20:56:59 +0000797 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200798 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200799 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200800 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200801 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200802 }
aliguoria1d1bb32008-11-18 20:07:32 +0000803
Andreas Färberf0c3c502013-08-26 21:22:53 +0200804 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000805
Andreas Färber00b941e2013-06-29 18:55:54 +0200806 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000807 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200808 }
aliguoria1d1bb32008-11-18 20:07:32 +0000809 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000810}
811
812/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200813int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000814{
aliguoria1d1bb32008-11-18 20:07:32 +0000815 CPUBreakpoint *bp;
816
Andreas Färberf0c3c502013-08-26 21:22:53 +0200817 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000818 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200819 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000820 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000821 }
bellard4c3a88a2003-07-26 12:06:08 +0000822 }
aliguoria1d1bb32008-11-18 20:07:32 +0000823 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000824}
825
aliguoria1d1bb32008-11-18 20:07:32 +0000826/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200827void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000828{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200829 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
830
831 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000832
Anthony Liguori7267c092011-08-20 22:09:37 -0500833 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000834}
835
836/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200837void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000838{
aliguoric0ce9982008-11-25 22:13:57 +0000839 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000840
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200842 if (bp->flags & mask) {
843 cpu_breakpoint_remove_by_ref(cpu, bp);
844 }
aliguoric0ce9982008-11-25 22:13:57 +0000845 }
bellard4c3a88a2003-07-26 12:06:08 +0000846}
847
bellardc33a3462003-07-29 20:50:33 +0000848/* enable or disable single step mode. EXCP_DEBUG is returned by the
849 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200850void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000851{
Andreas Färbered2803d2013-06-21 20:20:45 +0200852 if (cpu->singlestep_enabled != enabled) {
853 cpu->singlestep_enabled = enabled;
854 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200855 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200856 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100857 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000858 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700859 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000860 }
bellardc33a3462003-07-29 20:50:33 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862}
863
Andreas Färbera47dddd2013-09-03 17:38:47 +0200864void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000865{
866 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000867 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000868
869 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000870 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000871 fprintf(stderr, "qemu: fatal: ");
872 vfprintf(stderr, fmt, ap);
873 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200874 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000875 if (qemu_log_enabled()) {
876 qemu_log("qemu: fatal: ");
877 qemu_log_vprintf(fmt, ap2);
878 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200879 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000880 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000881 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000882 }
pbrook493ae1f2007-11-23 16:53:59 +0000883 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000884 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200885#if defined(CONFIG_USER_ONLY)
886 {
887 struct sigaction act;
888 sigfillset(&act.sa_mask);
889 act.sa_handler = SIG_DFL;
890 sigaction(SIGABRT, &act, NULL);
891 }
892#endif
bellard75012672003-06-21 13:11:07 +0000893 abort();
894}
895
bellard01243112004-01-04 15:48:17 +0000896#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400897/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200898static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
899{
900 RAMBlock *block;
901
Paolo Bonzini43771532013-09-09 17:58:40 +0200902 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200903 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200904 goto found;
905 }
Mike Day0dc3f442013-09-05 14:41:35 -0400906 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200907 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200908 goto found;
909 }
910 }
911
912 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
913 abort();
914
915found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200916 /* It is safe to write mru_block outside the iothread lock. This
917 * is what happens:
918 *
919 * mru_block = xxx
920 * rcu_read_unlock()
921 * xxx removed from list
922 * rcu_read_lock()
923 * read mru_block
924 * mru_block = NULL;
925 * call_rcu(reclaim_ramblock, xxx);
926 * rcu_read_unlock()
927 *
928 * atomic_rcu_set is not needed here. The block was already published
929 * when it was placed into the list. Here we're just making an extra
930 * copy of the pointer.
931 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932 ram_list.mru_block = block;
933 return block;
934}
935
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200936static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000937{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700938 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200940 RAMBlock *block;
941 ram_addr_t end;
942
943 end = TARGET_PAGE_ALIGN(start + length);
944 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000945
Mike Day0dc3f442013-09-05 14:41:35 -0400946 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200947 block = qemu_get_ram_block(start);
948 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200949 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700950 CPU_FOREACH(cpu) {
951 tlb_reset_dirty(cpu, start1, length);
952 }
Mike Day0dc3f442013-09-05 14:41:35 -0400953 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200954}
955
956/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000957bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
958 ram_addr_t length,
959 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200960{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000961 unsigned long end, page;
962 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200963
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000964 if (length == 0) {
965 return false;
966 }
967
968 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
969 page = start >> TARGET_PAGE_BITS;
970 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
971 page, end - page);
972
973 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200974 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200975 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000976
977 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000978}
979
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100980/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200981hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200982 MemoryRegionSection *section,
983 target_ulong vaddr,
984 hwaddr paddr, hwaddr xlat,
985 int prot,
986 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000987{
Avi Kivitya8170e52012-10-23 12:30:10 +0200988 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000989 CPUWatchpoint *wp;
990
Blue Swirlcc5bea62012-04-14 14:56:48 +0000991 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000992 /* Normal RAM. */
993 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200994 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000995 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200996 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000997 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000999 }
1000 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001001 AddressSpaceDispatch *d;
1002
1003 d = atomic_rcu_read(&section->address_space->dispatch);
1004 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001005 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001006 }
1007
1008 /* Make accesses to pages with watchpoints go via the
1009 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001010 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001011 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001012 /* Avoid trapping reads of pages with a write breakpoint. */
1013 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001014 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001015 *address |= TLB_MMIO;
1016 break;
1017 }
1018 }
1019 }
1020
1021 return iotlb;
1022}
bellard9fa3e852004-01-04 18:06:42 +00001023#endif /* defined(CONFIG_USER_ONLY) */
1024
pbrooke2eef172008-06-08 01:09:01 +00001025#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001026
Anthony Liguoric227f092009-10-01 16:12:16 -05001027static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001028 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001029static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001030
Igor Mammedova2b257d2014-10-31 16:38:37 +00001031static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1032 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001033
1034/*
1035 * Set a custom physical guest memory alloator.
1036 * Accelerators with unusual needs may need this. Hopefully, we can
1037 * get rid of it eventually.
1038 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001039void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001040{
1041 phys_mem_alloc = alloc;
1042}
1043
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001044static uint16_t phys_section_add(PhysPageMap *map,
1045 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001046{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001047 /* The physical section number is ORed with a page-aligned
1048 * pointer to produce the iotlb entries. Thus it should
1049 * never overflow into the page-aligned value.
1050 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001051 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001052
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 if (map->sections_nb == map->sections_nb_alloc) {
1054 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1055 map->sections = g_renew(MemoryRegionSection, map->sections,
1056 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001057 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001058 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001059 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001061}
1062
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001063static void phys_section_destroy(MemoryRegion *mr)
1064{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001065 memory_region_unref(mr);
1066
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001067 if (mr->subpage) {
1068 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001069 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001070 g_free(subpage);
1071 }
1072}
1073
Paolo Bonzini60926662013-05-29 12:30:26 +02001074static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001075{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001076 while (map->sections_nb > 0) {
1077 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001078 phys_section_destroy(section->mr);
1079 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001080 g_free(map->sections);
1081 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001082}
1083
Avi Kivityac1970f2012-10-03 16:22:53 +02001084static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085{
1086 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001087 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001088 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001089 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001090 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001091 MemoryRegionSection subsection = {
1092 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001093 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001094 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001095 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096
Avi Kivityf3705d52012-03-08 16:16:34 +02001097 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098
Avi Kivityf3705d52012-03-08 16:16:34 +02001099 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001100 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001101 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001103 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001105 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001106 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 }
1108 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001110 subpage_register(subpage, start, end,
1111 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112}
1113
1114
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001115static void register_multipage(AddressSpaceDispatch *d,
1116 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001117{
Avi Kivitya8170e52012-10-23 12:30:10 +02001118 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001119 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001120 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1121 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001122
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001123 assert(num_pages);
1124 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001125}
1126
Avi Kivityac1970f2012-10-03 16:22:53 +02001127static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001128{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001129 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001130 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001131 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001133
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001134 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1135 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1136 - now.offset_within_address_space;
1137
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001138 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001139 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001140 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001141 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001142 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 while (int128_ne(remain.size, now.size)) {
1144 remain.size = int128_sub(remain.size, now.size);
1145 remain.offset_within_address_space += int128_get64(now.size);
1146 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001147 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001149 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001150 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001151 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001152 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001153 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001155 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001156 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 }
1158}
1159
Sheng Yang62a27442010-01-26 19:21:16 +08001160void qemu_flush_coalesced_mmio_buffer(void)
1161{
1162 if (kvm_enabled())
1163 kvm_flush_coalesced_mmio_buffer();
1164}
1165
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001166void qemu_mutex_lock_ramlist(void)
1167{
1168 qemu_mutex_lock(&ram_list.mutex);
1169}
1170
1171void qemu_mutex_unlock_ramlist(void)
1172{
1173 qemu_mutex_unlock(&ram_list.mutex);
1174}
1175
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001176#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001177
1178#include <sys/vfs.h>
1179
1180#define HUGETLBFS_MAGIC 0x958458f6
1181
Hu Taofc7a5802014-09-09 13:28:01 +08001182static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001183{
1184 struct statfs fs;
1185 int ret;
1186
1187 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001188 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001189 } while (ret != 0 && errno == EINTR);
1190
1191 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001192 error_setg_errno(errp, errno, "failed to get page size of file %s",
1193 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001194 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195 }
1196
1197 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001198 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199
1200 return fs.f_bsize;
1201}
1202
Alex Williamson04b16652010-07-02 11:13:17 -06001203static void *file_ram_alloc(RAMBlock *block,
1204 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001205 const char *path,
1206 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001207{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001208 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001210 char *sanitized_name;
1211 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001212 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001213 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001214 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001215 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001216
Hu Taofc7a5802014-09-09 13:28:01 +08001217 hpagesize = gethugepagesize(path, &local_err);
1218 if (local_err) {
1219 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001220 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001222 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001223
1224 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001225 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1226 "or larger than huge page size 0x%" PRIx64,
1227 memory, hpagesize);
1228 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001229 }
1230
1231 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001232 error_setg(errp,
1233 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001234 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001235 }
1236
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001237 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1238 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1239 sanitized_name = g_strdup(memory_region_name(block->mr));
1240 for (c = sanitized_name; *c != '\0'; c++) {
1241 if (*c == '/') {
1242 *c = '_';
1243 }
1244 }
1245
1246 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1247 sanitized_name);
1248 g_free(sanitized_name);
1249
1250 fd = mkstemp(filename);
1251 if (fd >= 0) {
1252 unlink(filename);
1253 }
1254 g_free(filename);
1255 } else {
1256 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001257 }
1258
Marcelo Tosattic9027602010-03-01 20:25:08 -03001259 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001260 error_setg_errno(errp, errno,
1261 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001262 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001263 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264
Chen Hanxiao9284f312015-07-24 11:12:03 +08001265 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001266
1267 /*
1268 * ftruncate is not supported by hugetlbfs in older
1269 * hosts, so don't bother bailing out on errors.
1270 * If anything goes wrong with it under other filesystems,
1271 * mmap will fail.
1272 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001273 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001274 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001275 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001276
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001277 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001278 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001279 error_setg_errno(errp, errno,
1280 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001281 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001282 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001283 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001284
1285 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001286 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001287 }
1288
Alex Williamson04b16652010-07-02 11:13:17 -06001289 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001290 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001291
1292error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001293 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001294}
1295#endif
1296
Mike Day0dc3f442013-09-05 14:41:35 -04001297/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001298static ram_addr_t find_ram_offset(ram_addr_t size)
1299{
Alex Williamson04b16652010-07-02 11:13:17 -06001300 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001301 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001302
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001303 assert(size != 0); /* it would hand out same offset multiple times */
1304
Mike Day0dc3f442013-09-05 14:41:35 -04001305 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001306 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001307 }
Alex Williamson04b16652010-07-02 11:13:17 -06001308
Mike Day0dc3f442013-09-05 14:41:35 -04001309 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001310 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001311
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001312 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001313
Mike Day0dc3f442013-09-05 14:41:35 -04001314 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001315 if (next_block->offset >= end) {
1316 next = MIN(next, next_block->offset);
1317 }
1318 }
1319 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001320 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001321 mingap = next - end;
1322 }
1323 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001324
1325 if (offset == RAM_ADDR_MAX) {
1326 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1327 (uint64_t)size);
1328 abort();
1329 }
1330
Alex Williamson04b16652010-07-02 11:13:17 -06001331 return offset;
1332}
1333
Juan Quintela652d7ec2012-07-20 10:37:54 +02001334ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001335{
Alex Williamsond17b5282010-06-25 11:08:38 -06001336 RAMBlock *block;
1337 ram_addr_t last = 0;
1338
Mike Day0dc3f442013-09-05 14:41:35 -04001339 rcu_read_lock();
1340 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001341 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001342 }
Mike Day0dc3f442013-09-05 14:41:35 -04001343 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001344 return last;
1345}
1346
Jason Baronddb97f12012-08-02 15:44:16 -04001347static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1348{
1349 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001350
1351 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001352 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001353 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1354 if (ret) {
1355 perror("qemu_madvise");
1356 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1357 "but dump_guest_core=off specified\n");
1358 }
1359 }
1360}
1361
Mike Day0dc3f442013-09-05 14:41:35 -04001362/* Called within an RCU critical section, or while the ramlist lock
1363 * is held.
1364 */
Hu Tao20cfe882014-04-02 15:13:26 +08001365static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001366{
Hu Tao20cfe882014-04-02 15:13:26 +08001367 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001368
Mike Day0dc3f442013-09-05 14:41:35 -04001369 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001370 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001371 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001372 }
1373 }
Hu Tao20cfe882014-04-02 15:13:26 +08001374
1375 return NULL;
1376}
1377
Mike Dayae3a7042013-09-05 14:41:35 -04001378/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001379void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1380{
Mike Dayae3a7042013-09-05 14:41:35 -04001381 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001382
Mike Day0dc3f442013-09-05 14:41:35 -04001383 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001384 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001385 assert(new_block);
1386 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001387
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001388 if (dev) {
1389 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001390 if (id) {
1391 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001392 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001393 }
1394 }
1395 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1396
Mike Day0dc3f442013-09-05 14:41:35 -04001397 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001398 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001399 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1400 new_block->idstr);
1401 abort();
1402 }
1403 }
Mike Day0dc3f442013-09-05 14:41:35 -04001404 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001405}
1406
Mike Dayae3a7042013-09-05 14:41:35 -04001407/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001408void qemu_ram_unset_idstr(ram_addr_t addr)
1409{
Mike Dayae3a7042013-09-05 14:41:35 -04001410 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001411
Mike Dayae3a7042013-09-05 14:41:35 -04001412 /* FIXME: arch_init.c assumes that this is not called throughout
1413 * migration. Ignore the problem since hot-unplug during migration
1414 * does not work anyway.
1415 */
1416
Mike Day0dc3f442013-09-05 14:41:35 -04001417 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001418 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001419 if (block) {
1420 memset(block->idstr, 0, sizeof(block->idstr));
1421 }
Mike Day0dc3f442013-09-05 14:41:35 -04001422 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001423}
1424
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001425static int memory_try_enable_merging(void *addr, size_t len)
1426{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001427 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001428 /* disabled by the user */
1429 return 0;
1430 }
1431
1432 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1433}
1434
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001435/* Only legal before guest might have detected the memory size: e.g. on
1436 * incoming migration, or right after reset.
1437 *
1438 * As memory core doesn't know how is memory accessed, it is up to
1439 * resize callback to update device state and/or add assertions to detect
1440 * misuse, if necessary.
1441 */
1442int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1443{
1444 RAMBlock *block = find_ram_block(base);
1445
1446 assert(block);
1447
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001448 newsize = TARGET_PAGE_ALIGN(newsize);
1449
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001450 if (block->used_length == newsize) {
1451 return 0;
1452 }
1453
1454 if (!(block->flags & RAM_RESIZEABLE)) {
1455 error_setg_errno(errp, EINVAL,
1456 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1457 " in != 0x" RAM_ADDR_FMT, block->idstr,
1458 newsize, block->used_length);
1459 return -EINVAL;
1460 }
1461
1462 if (block->max_length < newsize) {
1463 error_setg_errno(errp, EINVAL,
1464 "Length too large: %s: 0x" RAM_ADDR_FMT
1465 " > 0x" RAM_ADDR_FMT, block->idstr,
1466 newsize, block->max_length);
1467 return -EINVAL;
1468 }
1469
1470 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1471 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001472 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1473 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001474 memory_region_set_size(block->mr, newsize);
1475 if (block->resized) {
1476 block->resized(block->idstr, newsize, block->host);
1477 }
1478 return 0;
1479}
1480
Hu Taoef701d72014-09-09 13:27:54 +08001481static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001482{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001483 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001484 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001485 ram_addr_t old_ram_size, new_ram_size;
1486
1487 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001488
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001489 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001490 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001491
1492 if (!new_block->host) {
1493 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001494 xen_ram_alloc(new_block->offset, new_block->max_length,
1495 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001496 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001497 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001498 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001499 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001500 error_setg_errno(errp, errno,
1501 "cannot set up guest memory '%s'",
1502 memory_region_name(new_block->mr));
1503 qemu_mutex_unlock_ramlist();
1504 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001505 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001506 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001507 }
1508 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001509
Li Zhijiandd631692015-07-02 20:18:06 +08001510 new_ram_size = MAX(old_ram_size,
1511 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1512 if (new_ram_size > old_ram_size) {
1513 migration_bitmap_extend(old_ram_size, new_ram_size);
1514 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001515 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1516 * QLIST (which has an RCU-friendly variant) does not have insertion at
1517 * tail, so save the last element in last_block.
1518 */
Mike Day0dc3f442013-09-05 14:41:35 -04001519 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001520 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001521 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001522 break;
1523 }
1524 }
1525 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001526 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001527 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001528 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001529 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001530 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001531 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001532 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001533
Mike Day0dc3f442013-09-05 14:41:35 -04001534 /* Write list before version */
1535 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001536 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001537 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001538
Juan Quintela2152f5c2013-10-08 13:52:02 +02001539 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1540
1541 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001542 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001543
1544 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001545 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1546 ram_list.dirty_memory[i] =
1547 bitmap_zero_extend(ram_list.dirty_memory[i],
1548 old_ram_size, new_ram_size);
1549 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001550 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001551 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001552 new_block->used_length,
1553 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001554
Paolo Bonzinia904c912015-01-21 16:18:35 +01001555 if (new_block->host) {
1556 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1557 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1558 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1559 if (kvm_enabled()) {
1560 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1561 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001562 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001563
1564 return new_block->offset;
1565}
1566
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001567#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001568ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001569 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001570 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001571{
1572 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001573 ram_addr_t addr;
1574 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575
1576 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001577 error_setg(errp, "-mem-path not supported with Xen");
1578 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001579 }
1580
1581 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1582 /*
1583 * file_ram_alloc() needs to allocate just like
1584 * phys_mem_alloc, but we haven't bothered to provide
1585 * a hook there.
1586 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001587 error_setg(errp,
1588 "-mem-path not supported with this accelerator");
1589 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001590 }
1591
1592 size = TARGET_PAGE_ALIGN(size);
1593 new_block = g_malloc0(sizeof(*new_block));
1594 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001595 new_block->used_length = size;
1596 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001597 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001598 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001599 new_block->host = file_ram_alloc(new_block, size,
1600 mem_path, errp);
1601 if (!new_block->host) {
1602 g_free(new_block);
1603 return -1;
1604 }
1605
Hu Taoef701d72014-09-09 13:27:54 +08001606 addr = ram_block_add(new_block, &local_err);
1607 if (local_err) {
1608 g_free(new_block);
1609 error_propagate(errp, local_err);
1610 return -1;
1611 }
1612 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001613}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001614#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001615
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001616static
1617ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1618 void (*resized)(const char*,
1619 uint64_t length,
1620 void *host),
1621 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001622 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001623{
1624 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001625 ram_addr_t addr;
1626 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001627
1628 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001629 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001630 new_block = g_malloc0(sizeof(*new_block));
1631 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001632 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001633 new_block->used_length = size;
1634 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001635 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 new_block->fd = -1;
1637 new_block->host = host;
1638 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001639 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001640 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001641 if (resizeable) {
1642 new_block->flags |= RAM_RESIZEABLE;
1643 }
Hu Taoef701d72014-09-09 13:27:54 +08001644 addr = ram_block_add(new_block, &local_err);
1645 if (local_err) {
1646 g_free(new_block);
1647 error_propagate(errp, local_err);
1648 return -1;
1649 }
1650 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001651}
1652
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001653ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1654 MemoryRegion *mr, Error **errp)
1655{
1656 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1657}
1658
Hu Taoef701d72014-09-09 13:27:54 +08001659ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001660{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001661 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1662}
1663
1664ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1665 void (*resized)(const char*,
1666 uint64_t length,
1667 void *host),
1668 MemoryRegion *mr, Error **errp)
1669{
1670 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001671}
bellarde9a1ab12007-02-08 23:08:38 +00001672
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001673void qemu_ram_free_from_ptr(ram_addr_t addr)
1674{
1675 RAMBlock *block;
1676
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001677 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001678 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001679 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001680 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001681 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001682 /* Write list before version */
1683 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001684 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001685 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001686 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001687 }
1688 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001689 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001690}
1691
Paolo Bonzini43771532013-09-09 17:58:40 +02001692static void reclaim_ramblock(RAMBlock *block)
1693{
1694 if (block->flags & RAM_PREALLOC) {
1695 ;
1696 } else if (xen_enabled()) {
1697 xen_invalidate_map_cache_entry(block->host);
1698#ifndef _WIN32
1699 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001700 if (block->flags & RAM_FILE) {
1701 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001702 } else {
1703 munmap(block->host, block->max_length);
1704 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001705 close(block->fd);
1706#endif
1707 } else {
1708 qemu_anon_ram_free(block->host, block->max_length);
1709 }
1710 g_free(block);
1711}
1712
Anthony Liguoric227f092009-10-01 16:12:16 -05001713void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001714{
Alex Williamson04b16652010-07-02 11:13:17 -06001715 RAMBlock *block;
1716
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001717 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001718 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001719 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001720 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001721 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001722 /* Write list before version */
1723 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001724 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001725 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001726 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001727 }
1728 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001729 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001730}
1731
Huang Yingcd19cfa2011-03-02 08:56:19 +01001732#ifndef _WIN32
1733void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1734{
1735 RAMBlock *block;
1736 ram_addr_t offset;
1737 int flags;
1738 void *area, *vaddr;
1739
Mike Day0dc3f442013-09-05 14:41:35 -04001740 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001741 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001742 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001743 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001744 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001745 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001746 } else if (xen_enabled()) {
1747 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001748 } else {
1749 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001750 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001751 flags |= (block->flags & RAM_SHARED ?
1752 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001753 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1754 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001755 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001756 /*
1757 * Remap needs to match alloc. Accelerators that
1758 * set phys_mem_alloc never remap. If they did,
1759 * we'd need a remap hook here.
1760 */
1761 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1762
Huang Yingcd19cfa2011-03-02 08:56:19 +01001763 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1764 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1765 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001766 }
1767 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001768 fprintf(stderr, "Could not remap addr: "
1769 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001770 length, addr);
1771 exit(1);
1772 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001773 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001774 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001775 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001776 }
1777 }
1778}
1779#endif /* !_WIN32 */
1780
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001781int qemu_get_ram_fd(ram_addr_t addr)
1782{
Mike Dayae3a7042013-09-05 14:41:35 -04001783 RAMBlock *block;
1784 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001785
Mike Day0dc3f442013-09-05 14:41:35 -04001786 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001787 block = qemu_get_ram_block(addr);
1788 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001789 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001790 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001791}
1792
Damjan Marion3fd74b82014-06-26 23:01:32 +02001793void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1794{
Mike Dayae3a7042013-09-05 14:41:35 -04001795 RAMBlock *block;
1796 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001797
Mike Day0dc3f442013-09-05 14:41:35 -04001798 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001799 block = qemu_get_ram_block(addr);
1800 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001801 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001802 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001803}
1804
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001805/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001806 * This should not be used for general purpose DMA. Use address_space_map
1807 * or address_space_rw instead. For local memory (e.g. video ram) that the
1808 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001809 *
1810 * By the time this function returns, the returned pointer is not protected
1811 * by RCU anymore. If the caller is not within an RCU critical section and
1812 * does not hold the iothread lock, it must have other means of protecting the
1813 * pointer, such as a reference to the region that includes the incoming
1814 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001815 */
1816void *qemu_get_ram_ptr(ram_addr_t addr)
1817{
Mike Dayae3a7042013-09-05 14:41:35 -04001818 RAMBlock *block;
1819 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001820
Mike Day0dc3f442013-09-05 14:41:35 -04001821 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001822 block = qemu_get_ram_block(addr);
1823
1824 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001825 /* We need to check if the requested address is in the RAM
1826 * because we don't want to map the entire memory in QEMU.
1827 * In that case just map until the end of the page.
1828 */
1829 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001830 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001831 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001832 }
Mike Dayae3a7042013-09-05 14:41:35 -04001833
1834 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001835 }
Mike Dayae3a7042013-09-05 14:41:35 -04001836 ptr = ramblock_ptr(block, addr - block->offset);
1837
Mike Day0dc3f442013-09-05 14:41:35 -04001838unlock:
1839 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001840 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001841}
1842
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001843/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001844 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001845 *
1846 * By the time this function returns, the returned pointer is not protected
1847 * by RCU anymore. If the caller is not within an RCU critical section and
1848 * does not hold the iothread lock, it must have other means of protecting the
1849 * pointer, such as a reference to the region that includes the incoming
1850 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001851 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001852static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001853{
Mike Dayae3a7042013-09-05 14:41:35 -04001854 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001855 if (*size == 0) {
1856 return NULL;
1857 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001858 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001859 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001860 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001861 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001862 rcu_read_lock();
1863 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001864 if (addr - block->offset < block->max_length) {
1865 if (addr - block->offset + *size > block->max_length)
1866 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001867 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001868 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001869 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001870 }
1871 }
1872
1873 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1874 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001875 }
1876}
1877
Paolo Bonzini7443b432013-06-03 12:44:02 +02001878/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001879 * (typically a TLB entry) back to a ram offset.
1880 *
1881 * By the time this function returns, the returned pointer is not protected
1882 * by RCU anymore. If the caller is not within an RCU critical section and
1883 * does not hold the iothread lock, it must have other means of protecting the
1884 * pointer, such as a reference to the region that includes the incoming
1885 * ram_addr_t.
1886 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001887MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001888{
pbrook94a6b542009-04-11 17:15:54 +00001889 RAMBlock *block;
1890 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001891 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001892
Jan Kiszka868bb332011-06-21 22:59:09 +02001893 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001894 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001895 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001896 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001897 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001898 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001899 }
1900
Mike Day0dc3f442013-09-05 14:41:35 -04001901 rcu_read_lock();
1902 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001903 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001904 goto found;
1905 }
1906
Mike Day0dc3f442013-09-05 14:41:35 -04001907 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001908 /* This case append when the block is not mapped. */
1909 if (block->host == NULL) {
1910 continue;
1911 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001912 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001913 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001914 }
pbrook94a6b542009-04-11 17:15:54 +00001915 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001916
Mike Day0dc3f442013-09-05 14:41:35 -04001917 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001918 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001919
1920found:
1921 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001922 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001923 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001924 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001925}
Alex Williamsonf471a172010-06-11 11:11:42 -06001926
Avi Kivitya8170e52012-10-23 12:30:10 +02001927static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001928 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001929{
Juan Quintela52159192013-10-08 12:44:04 +02001930 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001931 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001932 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001933 switch (size) {
1934 case 1:
1935 stb_p(qemu_get_ram_ptr(ram_addr), val);
1936 break;
1937 case 2:
1938 stw_p(qemu_get_ram_ptr(ram_addr), val);
1939 break;
1940 case 4:
1941 stl_p(qemu_get_ram_ptr(ram_addr), val);
1942 break;
1943 default:
1944 abort();
1945 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001946 /* Set both VGA and migration bits for simplicity and to remove
1947 * the notdirty callback faster.
1948 */
1949 cpu_physical_memory_set_dirty_range(ram_addr, size,
1950 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001951 /* we remove the notdirty callback only if the code has been
1952 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001953 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001954 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001955 }
bellard1ccde1c2004-02-06 19:46:14 +00001956}
1957
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001958static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1959 unsigned size, bool is_write)
1960{
1961 return is_write;
1962}
1963
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001964static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001965 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001966 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001967 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001968};
1969
pbrook0f459d12008-06-09 00:20:13 +00001970/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001971static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001972{
Andreas Färber93afead2013-08-26 03:41:01 +02001973 CPUState *cpu = current_cpu;
1974 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001975 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001976 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001977 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001978 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001979
Andreas Färberff4700b2013-08-26 18:23:18 +02001980 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001981 /* We re-entered the check after replacing the TB. Now raise
1982 * the debug interrupt so that is will trigger after the
1983 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001984 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001985 return;
1986 }
Andreas Färber93afead2013-08-26 03:41:01 +02001987 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001988 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001989 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1990 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001991 if (flags == BP_MEM_READ) {
1992 wp->flags |= BP_WATCHPOINT_HIT_READ;
1993 } else {
1994 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1995 }
1996 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001997 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001998 if (!cpu->watchpoint_hit) {
1999 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002000 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002001 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002002 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002003 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002004 } else {
2005 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002006 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002007 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002008 }
aliguori06d55cc2008-11-18 20:24:06 +00002009 }
aliguori6e140f22008-11-18 20:37:55 +00002010 } else {
2011 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002012 }
2013 }
2014}
2015
pbrook6658ffb2007-03-16 23:58:11 +00002016/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2017 so these check for a hit then pass through to the normal out-of-line
2018 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002019static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2020 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002021{
Peter Maydell66b9b432015-04-26 16:49:24 +01002022 MemTxResult res;
2023 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002024
Peter Maydell66b9b432015-04-26 16:49:24 +01002025 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002026 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002027 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002028 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002029 break;
2030 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002031 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002032 break;
2033 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002034 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002035 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002036 default: abort();
2037 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002038 *pdata = data;
2039 return res;
2040}
2041
2042static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2043 uint64_t val, unsigned size,
2044 MemTxAttrs attrs)
2045{
2046 MemTxResult res;
2047
2048 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2049 switch (size) {
2050 case 1:
2051 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2052 break;
2053 case 2:
2054 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2055 break;
2056 case 4:
2057 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2058 break;
2059 default: abort();
2060 }
2061 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002062}
2063
Avi Kivity1ec9b902012-01-02 12:47:48 +02002064static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002065 .read_with_attrs = watch_mem_read,
2066 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002067 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002068};
pbrook6658ffb2007-03-16 23:58:11 +00002069
Peter Maydellf25a49e2015-04-26 16:49:24 +01002070static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2071 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002072{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002073 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002074 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002075 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002076
blueswir1db7b5422007-05-26 17:36:03 +00002077#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002078 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002079 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002080#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002081 res = address_space_read(subpage->as, addr + subpage->base,
2082 attrs, buf, len);
2083 if (res) {
2084 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002085 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002086 switch (len) {
2087 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002088 *data = ldub_p(buf);
2089 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002090 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002091 *data = lduw_p(buf);
2092 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002093 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002094 *data = ldl_p(buf);
2095 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002096 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002097 *data = ldq_p(buf);
2098 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002099 default:
2100 abort();
2101 }
blueswir1db7b5422007-05-26 17:36:03 +00002102}
2103
Peter Maydellf25a49e2015-04-26 16:49:24 +01002104static MemTxResult subpage_write(void *opaque, hwaddr addr,
2105 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002106{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002107 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002108 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002109
blueswir1db7b5422007-05-26 17:36:03 +00002110#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002111 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002112 " value %"PRIx64"\n",
2113 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002114#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002115 switch (len) {
2116 case 1:
2117 stb_p(buf, value);
2118 break;
2119 case 2:
2120 stw_p(buf, value);
2121 break;
2122 case 4:
2123 stl_p(buf, value);
2124 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002125 case 8:
2126 stq_p(buf, value);
2127 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128 default:
2129 abort();
2130 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002131 return address_space_write(subpage->as, addr + subpage->base,
2132 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002133}
2134
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002135static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002136 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002137{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002138 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002139#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002140 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002141 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002142#endif
2143
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002144 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002145 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002146}
2147
Avi Kivity70c68e42012-01-02 12:32:48 +02002148static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002149 .read_with_attrs = subpage_read,
2150 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002151 .impl.min_access_size = 1,
2152 .impl.max_access_size = 8,
2153 .valid.min_access_size = 1,
2154 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002155 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002156 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002157};
2158
Anthony Liguoric227f092009-10-01 16:12:16 -05002159static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002160 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002161{
2162 int idx, eidx;
2163
2164 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2165 return -1;
2166 idx = SUBPAGE_IDX(start);
2167 eidx = SUBPAGE_IDX(end);
2168#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002169 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2170 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002171#endif
blueswir1db7b5422007-05-26 17:36:03 +00002172 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002173 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002174 }
2175
2176 return 0;
2177}
2178
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002179static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002180{
Anthony Liguoric227f092009-10-01 16:12:16 -05002181 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002182
Anthony Liguori7267c092011-08-20 22:09:37 -05002183 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002184
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002185 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002186 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002187 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002188 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002189 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002190#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002191 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2192 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002193#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002194 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002195
2196 return mmio;
2197}
2198
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002199static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2200 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002201{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002202 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002203 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002204 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002205 .mr = mr,
2206 .offset_within_address_space = 0,
2207 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002208 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002209 };
2210
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002211 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002212}
2213
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002214MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002215{
Peter Maydell32857f42015-10-01 15:29:50 +01002216 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2217 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002218 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002219
2220 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002221}
2222
Avi Kivitye9179ce2009-06-14 11:38:52 +03002223static void io_mem_init(void)
2224{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002225 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002226 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002227 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002228 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002229 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002230 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002231 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002232}
2233
Avi Kivityac1970f2012-10-03 16:22:53 +02002234static void mem_begin(MemoryListener *listener)
2235{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002236 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002237 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2238 uint16_t n;
2239
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002240 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002241 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002242 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002243 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002244 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002245 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002246 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002247 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002248
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002249 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002250 d->as = as;
2251 as->next_dispatch = d;
2252}
2253
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002254static void address_space_dispatch_free(AddressSpaceDispatch *d)
2255{
2256 phys_sections_free(&d->map);
2257 g_free(d);
2258}
2259
Paolo Bonzini00752702013-05-29 12:13:54 +02002260static void mem_commit(MemoryListener *listener)
2261{
2262 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002263 AddressSpaceDispatch *cur = as->dispatch;
2264 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002265
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002266 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002267
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002268 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002269 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002270 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002271 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002272}
2273
Avi Kivity1d711482012-10-02 18:54:45 +02002274static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002275{
Peter Maydell32857f42015-10-01 15:29:50 +01002276 CPUAddressSpace *cpuas;
2277 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002278
2279 /* since each CPU stores ram addresses in its TLB cache, we must
2280 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002281 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2282 cpu_reloading_memory_map();
2283 /* The CPU and TLB are protected by the iothread lock.
2284 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2285 * may have split the RCU critical section.
2286 */
2287 d = atomic_rcu_read(&cpuas->as->dispatch);
2288 cpuas->memory_dispatch = d;
2289 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002290}
2291
Avi Kivityac1970f2012-10-03 16:22:53 +02002292void address_space_init_dispatch(AddressSpace *as)
2293{
Paolo Bonzini00752702013-05-29 12:13:54 +02002294 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002295 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002296 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002297 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002298 .region_add = mem_add,
2299 .region_nop = mem_add,
2300 .priority = 0,
2301 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002302 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002303}
2304
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002305void address_space_unregister(AddressSpace *as)
2306{
2307 memory_listener_unregister(&as->dispatch_listener);
2308}
2309
Avi Kivity83f3c252012-10-07 12:59:55 +02002310void address_space_destroy_dispatch(AddressSpace *as)
2311{
2312 AddressSpaceDispatch *d = as->dispatch;
2313
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002314 atomic_rcu_set(&as->dispatch, NULL);
2315 if (d) {
2316 call_rcu(d, address_space_dispatch_free, rcu);
2317 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002318}
2319
Avi Kivity62152b82011-07-26 14:26:14 +03002320static void memory_map_init(void)
2321{
Anthony Liguori7267c092011-08-20 22:09:37 -05002322 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002323
Paolo Bonzini57271d62013-11-07 17:14:37 +01002324 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002325 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002326
Anthony Liguori7267c092011-08-20 22:09:37 -05002327 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002328 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2329 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002330 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002331}
2332
2333MemoryRegion *get_system_memory(void)
2334{
2335 return system_memory;
2336}
2337
Avi Kivity309cb472011-08-08 16:09:03 +03002338MemoryRegion *get_system_io(void)
2339{
2340 return system_io;
2341}
2342
pbrooke2eef172008-06-08 01:09:01 +00002343#endif /* !defined(CONFIG_USER_ONLY) */
2344
bellard13eb76e2004-01-24 15:23:36 +00002345/* physical memory access (slow version, mainly for debug) */
2346#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002347int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002348 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002349{
2350 int l, flags;
2351 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002352 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002353
2354 while (len > 0) {
2355 page = addr & TARGET_PAGE_MASK;
2356 l = (page + TARGET_PAGE_SIZE) - addr;
2357 if (l > len)
2358 l = len;
2359 flags = page_get_flags(page);
2360 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002361 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002362 if (is_write) {
2363 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002364 return -1;
bellard579a97f2007-11-11 14:26:47 +00002365 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002366 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002367 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002368 memcpy(p, buf, l);
2369 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002370 } else {
2371 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002372 return -1;
bellard579a97f2007-11-11 14:26:47 +00002373 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002374 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002375 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002376 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002377 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002378 }
2379 len -= l;
2380 buf += l;
2381 addr += l;
2382 }
Paul Brooka68fe892010-03-01 00:08:59 +00002383 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002384}
bellard8df1cd02005-01-28 22:37:22 +00002385
bellard13eb76e2004-01-24 15:23:36 +00002386#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002387
Paolo Bonzini845b6212015-03-23 11:45:53 +01002388static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002389 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002390{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002391 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2392 /* No early return if dirty_log_mask is or becomes 0, because
2393 * cpu_physical_memory_set_dirty_range will still call
2394 * xen_modified_memory.
2395 */
2396 if (dirty_log_mask) {
2397 dirty_log_mask =
2398 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002399 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002400 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2401 tb_invalidate_phys_range(addr, addr + length);
2402 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2403 }
2404 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002405}
2406
Richard Henderson23326162013-07-08 14:55:59 -07002407static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002408{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002409 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002410
2411 /* Regions are assumed to support 1-4 byte accesses unless
2412 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002413 if (access_size_max == 0) {
2414 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002415 }
Richard Henderson23326162013-07-08 14:55:59 -07002416
2417 /* Bound the maximum access by the alignment of the address. */
2418 if (!mr->ops->impl.unaligned) {
2419 unsigned align_size_max = addr & -addr;
2420 if (align_size_max != 0 && align_size_max < access_size_max) {
2421 access_size_max = align_size_max;
2422 }
2423 }
2424
2425 /* Don't attempt accesses larger than the maximum. */
2426 if (l > access_size_max) {
2427 l = access_size_max;
2428 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002429 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002430
2431 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002432}
2433
Jan Kiszka4840f102015-06-18 18:47:22 +02002434static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002435{
Jan Kiszka4840f102015-06-18 18:47:22 +02002436 bool unlocked = !qemu_mutex_iothread_locked();
2437 bool release_lock = false;
2438
2439 if (unlocked && mr->global_locking) {
2440 qemu_mutex_lock_iothread();
2441 unlocked = false;
2442 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002443 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002444 if (mr->flush_coalesced_mmio) {
2445 if (unlocked) {
2446 qemu_mutex_lock_iothread();
2447 }
2448 qemu_flush_coalesced_mmio_buffer();
2449 if (unlocked) {
2450 qemu_mutex_unlock_iothread();
2451 }
2452 }
2453
2454 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002455}
2456
Peter Maydell5c9eb022015-04-26 16:49:24 +01002457MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2458 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002459{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002460 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002461 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002462 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002463 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002464 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002465 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002466 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002467
Paolo Bonzini41063e12015-03-18 14:21:43 +01002468 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002469 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002470 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002471 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002472
bellard13eb76e2004-01-24 15:23:36 +00002473 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002474 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002475 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002476 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002477 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002478 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002479 switch (l) {
2480 case 8:
2481 /* 64 bit write access */
2482 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002483 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2484 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002485 break;
2486 case 4:
bellard1c213d12005-09-03 10:49:04 +00002487 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002488 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002489 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2490 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002491 break;
2492 case 2:
bellard1c213d12005-09-03 10:49:04 +00002493 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002494 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002495 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2496 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002497 break;
2498 case 1:
bellard1c213d12005-09-03 10:49:04 +00002499 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002500 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002501 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2502 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002503 break;
2504 default:
2505 abort();
bellard13eb76e2004-01-24 15:23:36 +00002506 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002507 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002508 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002509 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002510 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002511 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002512 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002513 }
2514 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002515 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002516 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002517 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002518 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002519 switch (l) {
2520 case 8:
2521 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002522 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2523 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002524 stq_p(buf, val);
2525 break;
2526 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002527 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002528 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2529 attrs);
bellardc27004e2005-01-03 23:35:10 +00002530 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002531 break;
2532 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002533 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002534 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2535 attrs);
bellardc27004e2005-01-03 23:35:10 +00002536 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002537 break;
2538 case 1:
bellard1c213d12005-09-03 10:49:04 +00002539 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002540 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2541 attrs);
bellardc27004e2005-01-03 23:35:10 +00002542 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002543 break;
2544 default:
2545 abort();
bellard13eb76e2004-01-24 15:23:36 +00002546 }
2547 } else {
2548 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002549 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002550 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002551 }
2552 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002553
2554 if (release_lock) {
2555 qemu_mutex_unlock_iothread();
2556 release_lock = false;
2557 }
2558
bellard13eb76e2004-01-24 15:23:36 +00002559 len -= l;
2560 buf += l;
2561 addr += l;
2562 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002563 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002564
Peter Maydell3b643492015-04-26 16:49:23 +01002565 return result;
bellard13eb76e2004-01-24 15:23:36 +00002566}
bellard8df1cd02005-01-28 22:37:22 +00002567
Peter Maydell5c9eb022015-04-26 16:49:24 +01002568MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2569 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002570{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002571 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002572}
2573
Peter Maydell5c9eb022015-04-26 16:49:24 +01002574MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2575 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002576{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002577 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002578}
2579
2580
Avi Kivitya8170e52012-10-23 12:30:10 +02002581void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002582 int len, int is_write)
2583{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002584 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2585 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002586}
2587
Alexander Graf582b55a2013-12-11 14:17:44 +01002588enum write_rom_type {
2589 WRITE_DATA,
2590 FLUSH_CACHE,
2591};
2592
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002593static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002594 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002595{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002596 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002597 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002598 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002599 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002600
Paolo Bonzini41063e12015-03-18 14:21:43 +01002601 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002602 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002603 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002604 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002605
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002606 if (!(memory_region_is_ram(mr) ||
2607 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002608 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002609 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002610 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002611 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002612 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002613 switch (type) {
2614 case WRITE_DATA:
2615 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002616 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002617 break;
2618 case FLUSH_CACHE:
2619 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2620 break;
2621 }
bellardd0ecd2a2006-04-23 17:14:48 +00002622 }
2623 len -= l;
2624 buf += l;
2625 addr += l;
2626 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002627 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002628}
2629
Alexander Graf582b55a2013-12-11 14:17:44 +01002630/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002631void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002632 const uint8_t *buf, int len)
2633{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002634 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002635}
2636
2637void cpu_flush_icache_range(hwaddr start, int len)
2638{
2639 /*
2640 * This function should do the same thing as an icache flush that was
2641 * triggered from within the guest. For TCG we are always cache coherent,
2642 * so there is no need to flush anything. For KVM / Xen we need to flush
2643 * the host's instruction cache at least.
2644 */
2645 if (tcg_enabled()) {
2646 return;
2647 }
2648
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002649 cpu_physical_memory_write_rom_internal(&address_space_memory,
2650 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002651}
2652
aliguori6d16c2f2009-01-22 16:59:11 +00002653typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002654 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002655 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002656 hwaddr addr;
2657 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002658 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002659} BounceBuffer;
2660
2661static BounceBuffer bounce;
2662
aliguoriba223c22009-01-22 16:59:16 +00002663typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002664 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002665 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002666} MapClient;
2667
Fam Zheng38e047b2015-03-16 17:03:35 +08002668QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002669static QLIST_HEAD(map_client_list, MapClient) map_client_list
2670 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002671
Fam Zhenge95205e2015-03-16 17:03:37 +08002672static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002673{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002674 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002675 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002676}
2677
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002678static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002679{
2680 MapClient *client;
2681
Blue Swirl72cf2d42009-09-12 07:36:22 +00002682 while (!QLIST_EMPTY(&map_client_list)) {
2683 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002684 qemu_bh_schedule(client->bh);
2685 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002686 }
2687}
2688
Fam Zhenge95205e2015-03-16 17:03:37 +08002689void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002690{
2691 MapClient *client = g_malloc(sizeof(*client));
2692
Fam Zheng38e047b2015-03-16 17:03:35 +08002693 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002694 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002695 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002696 if (!atomic_read(&bounce.in_use)) {
2697 cpu_notify_map_clients_locked();
2698 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002699 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002700}
2701
Fam Zheng38e047b2015-03-16 17:03:35 +08002702void cpu_exec_init_all(void)
2703{
2704 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002705 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002706 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002707 qemu_mutex_init(&map_client_list_lock);
2708}
2709
Fam Zhenge95205e2015-03-16 17:03:37 +08002710void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002711{
Fam Zhenge95205e2015-03-16 17:03:37 +08002712 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002713
Fam Zhenge95205e2015-03-16 17:03:37 +08002714 qemu_mutex_lock(&map_client_list_lock);
2715 QLIST_FOREACH(client, &map_client_list, link) {
2716 if (client->bh == bh) {
2717 cpu_unregister_map_client_do(client);
2718 break;
2719 }
2720 }
2721 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002722}
2723
2724static void cpu_notify_map_clients(void)
2725{
Fam Zheng38e047b2015-03-16 17:03:35 +08002726 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002727 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002728 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002729}
2730
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002731bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2732{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002733 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002734 hwaddr l, xlat;
2735
Paolo Bonzini41063e12015-03-18 14:21:43 +01002736 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002737 while (len > 0) {
2738 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002739 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2740 if (!memory_access_is_direct(mr, is_write)) {
2741 l = memory_access_size(mr, l, addr);
2742 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002743 return false;
2744 }
2745 }
2746
2747 len -= l;
2748 addr += l;
2749 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002750 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002751 return true;
2752}
2753
aliguori6d16c2f2009-01-22 16:59:11 +00002754/* Map a physical memory region into a host virtual address.
2755 * May map a subset of the requested range, given by and returned in *plen.
2756 * May return NULL if resources needed to perform the mapping are exhausted.
2757 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002758 * Use cpu_register_map_client() to know when retrying the map operation is
2759 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002760 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002761void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002762 hwaddr addr,
2763 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002764 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002765{
Avi Kivitya8170e52012-10-23 12:30:10 +02002766 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002767 hwaddr done = 0;
2768 hwaddr l, xlat, base;
2769 MemoryRegion *mr, *this_mr;
2770 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002771
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002772 if (len == 0) {
2773 return NULL;
2774 }
aliguori6d16c2f2009-01-22 16:59:11 +00002775
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002776 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002777 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002778 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002779
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002780 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002781 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002782 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002783 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002784 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002785 /* Avoid unbounded allocations */
2786 l = MIN(l, TARGET_PAGE_SIZE);
2787 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002788 bounce.addr = addr;
2789 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002790
2791 memory_region_ref(mr);
2792 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002793 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002794 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2795 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002796 }
aliguori6d16c2f2009-01-22 16:59:11 +00002797
Paolo Bonzini41063e12015-03-18 14:21:43 +01002798 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002799 *plen = l;
2800 return bounce.buffer;
2801 }
2802
2803 base = xlat;
2804 raddr = memory_region_get_ram_addr(mr);
2805
2806 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002807 len -= l;
2808 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002809 done += l;
2810 if (len == 0) {
2811 break;
2812 }
2813
2814 l = len;
2815 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2816 if (this_mr != mr || xlat != base + done) {
2817 break;
2818 }
aliguori6d16c2f2009-01-22 16:59:11 +00002819 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002820
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002821 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002822 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002823 *plen = done;
2824 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002825}
2826
Avi Kivityac1970f2012-10-03 16:22:53 +02002827/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002828 * Will also mark the memory as dirty if is_write == 1. access_len gives
2829 * the amount of memory that was actually read or written by the caller.
2830 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002831void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2832 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002833{
2834 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002835 MemoryRegion *mr;
2836 ram_addr_t addr1;
2837
2838 mr = qemu_ram_addr_from_host(buffer, &addr1);
2839 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002840 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002841 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002842 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002843 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002844 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002845 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002846 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002847 return;
2848 }
2849 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002850 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2851 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002852 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002853 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002854 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002855 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002856 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002857 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002858}
bellardd0ecd2a2006-04-23 17:14:48 +00002859
Avi Kivitya8170e52012-10-23 12:30:10 +02002860void *cpu_physical_memory_map(hwaddr addr,
2861 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002862 int is_write)
2863{
2864 return address_space_map(&address_space_memory, addr, plen, is_write);
2865}
2866
Avi Kivitya8170e52012-10-23 12:30:10 +02002867void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2868 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002869{
2870 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2871}
2872
bellard8df1cd02005-01-28 22:37:22 +00002873/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002874static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2875 MemTxAttrs attrs,
2876 MemTxResult *result,
2877 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002878{
bellard8df1cd02005-01-28 22:37:22 +00002879 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002880 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002881 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002882 hwaddr l = 4;
2883 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002884 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002885 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002886
Paolo Bonzini41063e12015-03-18 14:21:43 +01002887 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002888 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002889 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002890 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002891
bellard8df1cd02005-01-28 22:37:22 +00002892 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002893 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002894#if defined(TARGET_WORDS_BIGENDIAN)
2895 if (endian == DEVICE_LITTLE_ENDIAN) {
2896 val = bswap32(val);
2897 }
2898#else
2899 if (endian == DEVICE_BIG_ENDIAN) {
2900 val = bswap32(val);
2901 }
2902#endif
bellard8df1cd02005-01-28 22:37:22 +00002903 } else {
2904 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002905 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002906 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002907 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002908 switch (endian) {
2909 case DEVICE_LITTLE_ENDIAN:
2910 val = ldl_le_p(ptr);
2911 break;
2912 case DEVICE_BIG_ENDIAN:
2913 val = ldl_be_p(ptr);
2914 break;
2915 default:
2916 val = ldl_p(ptr);
2917 break;
2918 }
Peter Maydell50013112015-04-26 16:49:24 +01002919 r = MEMTX_OK;
2920 }
2921 if (result) {
2922 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002923 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002924 if (release_lock) {
2925 qemu_mutex_unlock_iothread();
2926 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002927 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002928 return val;
2929}
2930
Peter Maydell50013112015-04-26 16:49:24 +01002931uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2932 MemTxAttrs attrs, MemTxResult *result)
2933{
2934 return address_space_ldl_internal(as, addr, attrs, result,
2935 DEVICE_NATIVE_ENDIAN);
2936}
2937
2938uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2939 MemTxAttrs attrs, MemTxResult *result)
2940{
2941 return address_space_ldl_internal(as, addr, attrs, result,
2942 DEVICE_LITTLE_ENDIAN);
2943}
2944
2945uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2946 MemTxAttrs attrs, MemTxResult *result)
2947{
2948 return address_space_ldl_internal(as, addr, attrs, result,
2949 DEVICE_BIG_ENDIAN);
2950}
2951
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002952uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002953{
Peter Maydell50013112015-04-26 16:49:24 +01002954 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002955}
2956
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002957uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002958{
Peter Maydell50013112015-04-26 16:49:24 +01002959 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002960}
2961
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002962uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002963{
Peter Maydell50013112015-04-26 16:49:24 +01002964 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002965}
2966
bellard84b7b8e2005-11-28 21:19:04 +00002967/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002968static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2969 MemTxAttrs attrs,
2970 MemTxResult *result,
2971 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002972{
bellard84b7b8e2005-11-28 21:19:04 +00002973 uint8_t *ptr;
2974 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002975 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002976 hwaddr l = 8;
2977 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002978 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002979 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002980
Paolo Bonzini41063e12015-03-18 14:21:43 +01002981 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002982 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002983 false);
2984 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002985 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002986
bellard84b7b8e2005-11-28 21:19:04 +00002987 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002988 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002989#if defined(TARGET_WORDS_BIGENDIAN)
2990 if (endian == DEVICE_LITTLE_ENDIAN) {
2991 val = bswap64(val);
2992 }
2993#else
2994 if (endian == DEVICE_BIG_ENDIAN) {
2995 val = bswap64(val);
2996 }
2997#endif
bellard84b7b8e2005-11-28 21:19:04 +00002998 } else {
2999 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003000 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003001 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003002 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003003 switch (endian) {
3004 case DEVICE_LITTLE_ENDIAN:
3005 val = ldq_le_p(ptr);
3006 break;
3007 case DEVICE_BIG_ENDIAN:
3008 val = ldq_be_p(ptr);
3009 break;
3010 default:
3011 val = ldq_p(ptr);
3012 break;
3013 }
Peter Maydell50013112015-04-26 16:49:24 +01003014 r = MEMTX_OK;
3015 }
3016 if (result) {
3017 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003018 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003019 if (release_lock) {
3020 qemu_mutex_unlock_iothread();
3021 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003022 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003023 return val;
3024}
3025
Peter Maydell50013112015-04-26 16:49:24 +01003026uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3027 MemTxAttrs attrs, MemTxResult *result)
3028{
3029 return address_space_ldq_internal(as, addr, attrs, result,
3030 DEVICE_NATIVE_ENDIAN);
3031}
3032
3033uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3034 MemTxAttrs attrs, MemTxResult *result)
3035{
3036 return address_space_ldq_internal(as, addr, attrs, result,
3037 DEVICE_LITTLE_ENDIAN);
3038}
3039
3040uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3041 MemTxAttrs attrs, MemTxResult *result)
3042{
3043 return address_space_ldq_internal(as, addr, attrs, result,
3044 DEVICE_BIG_ENDIAN);
3045}
3046
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003047uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003048{
Peter Maydell50013112015-04-26 16:49:24 +01003049 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003050}
3051
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003052uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003053{
Peter Maydell50013112015-04-26 16:49:24 +01003054 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003055}
3056
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003057uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003058{
Peter Maydell50013112015-04-26 16:49:24 +01003059 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003060}
3061
bellardaab33092005-10-30 20:48:42 +00003062/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003063uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3064 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003065{
3066 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003067 MemTxResult r;
3068
3069 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3070 if (result) {
3071 *result = r;
3072 }
bellardaab33092005-10-30 20:48:42 +00003073 return val;
3074}
3075
Peter Maydell50013112015-04-26 16:49:24 +01003076uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3077{
3078 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3079}
3080
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003081/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003082static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3083 hwaddr addr,
3084 MemTxAttrs attrs,
3085 MemTxResult *result,
3086 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003087{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003088 uint8_t *ptr;
3089 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003090 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003091 hwaddr l = 2;
3092 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003093 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003094 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003095
Paolo Bonzini41063e12015-03-18 14:21:43 +01003096 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003097 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003098 false);
3099 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003100 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003101
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003102 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003103 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003104#if defined(TARGET_WORDS_BIGENDIAN)
3105 if (endian == DEVICE_LITTLE_ENDIAN) {
3106 val = bswap16(val);
3107 }
3108#else
3109 if (endian == DEVICE_BIG_ENDIAN) {
3110 val = bswap16(val);
3111 }
3112#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003113 } else {
3114 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003115 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003116 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003117 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003118 switch (endian) {
3119 case DEVICE_LITTLE_ENDIAN:
3120 val = lduw_le_p(ptr);
3121 break;
3122 case DEVICE_BIG_ENDIAN:
3123 val = lduw_be_p(ptr);
3124 break;
3125 default:
3126 val = lduw_p(ptr);
3127 break;
3128 }
Peter Maydell50013112015-04-26 16:49:24 +01003129 r = MEMTX_OK;
3130 }
3131 if (result) {
3132 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003133 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003134 if (release_lock) {
3135 qemu_mutex_unlock_iothread();
3136 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003137 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003138 return val;
bellardaab33092005-10-30 20:48:42 +00003139}
3140
Peter Maydell50013112015-04-26 16:49:24 +01003141uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3142 MemTxAttrs attrs, MemTxResult *result)
3143{
3144 return address_space_lduw_internal(as, addr, attrs, result,
3145 DEVICE_NATIVE_ENDIAN);
3146}
3147
3148uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3149 MemTxAttrs attrs, MemTxResult *result)
3150{
3151 return address_space_lduw_internal(as, addr, attrs, result,
3152 DEVICE_LITTLE_ENDIAN);
3153}
3154
3155uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3156 MemTxAttrs attrs, MemTxResult *result)
3157{
3158 return address_space_lduw_internal(as, addr, attrs, result,
3159 DEVICE_BIG_ENDIAN);
3160}
3161
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003162uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003163{
Peter Maydell50013112015-04-26 16:49:24 +01003164 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003165}
3166
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003167uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003168{
Peter Maydell50013112015-04-26 16:49:24 +01003169 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003170}
3171
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003172uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003173{
Peter Maydell50013112015-04-26 16:49:24 +01003174 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003175}
3176
bellard8df1cd02005-01-28 22:37:22 +00003177/* warning: addr must be aligned. The ram page is not masked as dirty
3178 and the code inside is not invalidated. It is useful if the dirty
3179 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003180void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3181 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003182{
bellard8df1cd02005-01-28 22:37:22 +00003183 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003184 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003185 hwaddr l = 4;
3186 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003187 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003188 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003189 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003190
Paolo Bonzini41063e12015-03-18 14:21:43 +01003191 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003192 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003193 true);
3194 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003195 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003196
Peter Maydell50013112015-04-26 16:49:24 +01003197 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003198 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003199 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003200 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003201 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003202
Paolo Bonzini845b6212015-03-23 11:45:53 +01003203 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3204 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003205 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003206 r = MEMTX_OK;
3207 }
3208 if (result) {
3209 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003210 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003211 if (release_lock) {
3212 qemu_mutex_unlock_iothread();
3213 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003214 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003215}
3216
Peter Maydell50013112015-04-26 16:49:24 +01003217void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3218{
3219 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3220}
3221
bellard8df1cd02005-01-28 22:37:22 +00003222/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003223static inline void address_space_stl_internal(AddressSpace *as,
3224 hwaddr addr, uint32_t val,
3225 MemTxAttrs attrs,
3226 MemTxResult *result,
3227 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003228{
bellard8df1cd02005-01-28 22:37:22 +00003229 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003230 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003231 hwaddr l = 4;
3232 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003233 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003234 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003235
Paolo Bonzini41063e12015-03-18 14:21:43 +01003236 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003237 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003238 true);
3239 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003240 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003241
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003242#if defined(TARGET_WORDS_BIGENDIAN)
3243 if (endian == DEVICE_LITTLE_ENDIAN) {
3244 val = bswap32(val);
3245 }
3246#else
3247 if (endian == DEVICE_BIG_ENDIAN) {
3248 val = bswap32(val);
3249 }
3250#endif
Peter Maydell50013112015-04-26 16:49:24 +01003251 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003252 } else {
bellard8df1cd02005-01-28 22:37:22 +00003253 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003254 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003255 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003256 switch (endian) {
3257 case DEVICE_LITTLE_ENDIAN:
3258 stl_le_p(ptr, val);
3259 break;
3260 case DEVICE_BIG_ENDIAN:
3261 stl_be_p(ptr, val);
3262 break;
3263 default:
3264 stl_p(ptr, val);
3265 break;
3266 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003267 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003268 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003269 }
Peter Maydell50013112015-04-26 16:49:24 +01003270 if (result) {
3271 *result = r;
3272 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003273 if (release_lock) {
3274 qemu_mutex_unlock_iothread();
3275 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003276 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003277}
3278
3279void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3280 MemTxAttrs attrs, MemTxResult *result)
3281{
3282 address_space_stl_internal(as, addr, val, attrs, result,
3283 DEVICE_NATIVE_ENDIAN);
3284}
3285
3286void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3287 MemTxAttrs attrs, MemTxResult *result)
3288{
3289 address_space_stl_internal(as, addr, val, attrs, result,
3290 DEVICE_LITTLE_ENDIAN);
3291}
3292
3293void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3294 MemTxAttrs attrs, MemTxResult *result)
3295{
3296 address_space_stl_internal(as, addr, val, attrs, result,
3297 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003298}
3299
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003300void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003301{
Peter Maydell50013112015-04-26 16:49:24 +01003302 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003303}
3304
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003305void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003306{
Peter Maydell50013112015-04-26 16:49:24 +01003307 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003308}
3309
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003310void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003311{
Peter Maydell50013112015-04-26 16:49:24 +01003312 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313}
3314
bellardaab33092005-10-30 20:48:42 +00003315/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003316void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3317 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003318{
3319 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003320 MemTxResult r;
3321
3322 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3323 if (result) {
3324 *result = r;
3325 }
3326}
3327
3328void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3329{
3330 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003331}
3332
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003333/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003334static inline void address_space_stw_internal(AddressSpace *as,
3335 hwaddr addr, uint32_t val,
3336 MemTxAttrs attrs,
3337 MemTxResult *result,
3338 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003339{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003340 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003341 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003342 hwaddr l = 2;
3343 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003344 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003345 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003346
Paolo Bonzini41063e12015-03-18 14:21:43 +01003347 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003348 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003349 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003350 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003351
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003352#if defined(TARGET_WORDS_BIGENDIAN)
3353 if (endian == DEVICE_LITTLE_ENDIAN) {
3354 val = bswap16(val);
3355 }
3356#else
3357 if (endian == DEVICE_BIG_ENDIAN) {
3358 val = bswap16(val);
3359 }
3360#endif
Peter Maydell50013112015-04-26 16:49:24 +01003361 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003362 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003363 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003364 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003365 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366 switch (endian) {
3367 case DEVICE_LITTLE_ENDIAN:
3368 stw_le_p(ptr, val);
3369 break;
3370 case DEVICE_BIG_ENDIAN:
3371 stw_be_p(ptr, val);
3372 break;
3373 default:
3374 stw_p(ptr, val);
3375 break;
3376 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003377 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003378 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003379 }
Peter Maydell50013112015-04-26 16:49:24 +01003380 if (result) {
3381 *result = r;
3382 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003383 if (release_lock) {
3384 qemu_mutex_unlock_iothread();
3385 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003386 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003387}
3388
3389void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3390 MemTxAttrs attrs, MemTxResult *result)
3391{
3392 address_space_stw_internal(as, addr, val, attrs, result,
3393 DEVICE_NATIVE_ENDIAN);
3394}
3395
3396void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3397 MemTxAttrs attrs, MemTxResult *result)
3398{
3399 address_space_stw_internal(as, addr, val, attrs, result,
3400 DEVICE_LITTLE_ENDIAN);
3401}
3402
3403void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3404 MemTxAttrs attrs, MemTxResult *result)
3405{
3406 address_space_stw_internal(as, addr, val, attrs, result,
3407 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003408}
3409
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003410void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003411{
Peter Maydell50013112015-04-26 16:49:24 +01003412 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413}
3414
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003415void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003416{
Peter Maydell50013112015-04-26 16:49:24 +01003417 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003418}
3419
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003420void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003421{
Peter Maydell50013112015-04-26 16:49:24 +01003422 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003423}
3424
bellardaab33092005-10-30 20:48:42 +00003425/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003426void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3427 MemTxAttrs attrs, MemTxResult *result)
3428{
3429 MemTxResult r;
3430 val = tswap64(val);
3431 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3432 if (result) {
3433 *result = r;
3434 }
3435}
3436
3437void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3438 MemTxAttrs attrs, MemTxResult *result)
3439{
3440 MemTxResult r;
3441 val = cpu_to_le64(val);
3442 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3443 if (result) {
3444 *result = r;
3445 }
3446}
3447void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3448 MemTxAttrs attrs, MemTxResult *result)
3449{
3450 MemTxResult r;
3451 val = cpu_to_be64(val);
3452 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3453 if (result) {
3454 *result = r;
3455 }
3456}
3457
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003458void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003459{
Peter Maydell50013112015-04-26 16:49:24 +01003460 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003461}
3462
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003463void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003464{
Peter Maydell50013112015-04-26 16:49:24 +01003465 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003466}
3467
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003468void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003469{
Peter Maydell50013112015-04-26 16:49:24 +01003470 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003471}
3472
aliguori5e2972f2009-03-28 17:51:36 +00003473/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003474int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003475 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003476{
3477 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003478 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003479 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003480
3481 while (len > 0) {
3482 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003483 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003484 /* if no physical page mapped, return an error */
3485 if (phys_addr == -1)
3486 return -1;
3487 l = (page + TARGET_PAGE_SIZE) - addr;
3488 if (l > len)
3489 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003490 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003491 if (is_write) {
3492 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3493 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003494 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3495 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003496 }
bellard13eb76e2004-01-24 15:23:36 +00003497 len -= l;
3498 buf += l;
3499 addr += l;
3500 }
3501 return 0;
3502}
Paul Brooka68fe892010-03-01 00:08:59 +00003503#endif
bellard13eb76e2004-01-24 15:23:36 +00003504
Blue Swirl8e4a4242013-01-06 18:30:17 +00003505/*
3506 * A helper function for the _utterly broken_ virtio device model to find out if
3507 * it's running on a big endian machine. Don't do this at home kids!
3508 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003509bool target_words_bigendian(void);
3510bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003511{
3512#if defined(TARGET_WORDS_BIGENDIAN)
3513 return true;
3514#else
3515 return false;
3516#endif
3517}
3518
Wen Congyang76f35532012-05-07 12:04:18 +08003519#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003520bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003521{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003522 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003523 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003524 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003525
Paolo Bonzini41063e12015-03-18 14:21:43 +01003526 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003527 mr = address_space_translate(&address_space_memory,
3528 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003529
Paolo Bonzini41063e12015-03-18 14:21:43 +01003530 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3531 rcu_read_unlock();
3532 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003533}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003534
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003535int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003536{
3537 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003538 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003539
Mike Day0dc3f442013-09-05 14:41:35 -04003540 rcu_read_lock();
3541 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003542 ret = func(block->idstr, block->host, block->offset,
3543 block->used_length, opaque);
3544 if (ret) {
3545 break;
3546 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003547 }
Mike Day0dc3f442013-09-05 14:41:35 -04003548 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003549 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003550}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003551#endif