blob: 8af2570579355dabb5805daec8159218e588bef8 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030090/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030091 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030092#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000093#endif
bellard9fa3e852004-01-04 18:06:42 +000094
Andreas Färberbdc44642013-06-24 23:50:24 +020095struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000096/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020098__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000099/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000100 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000101 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100102int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000103
pbrooke2eef172008-06-08 01:09:01 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106typedef struct PhysPageEntry PhysPageEntry;
107
108struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200113};
114
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200115#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100118#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200120#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121#define P_L2_SIZE (1 << P_L2_BITS)
122
123#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100128 struct rcu_head rcu;
129
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100139 struct rcu_head rcu;
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141 /* This is a multi-level map on the physical address space.
142 * The bottom level has pointers to MemoryRegionSections.
143 */
144 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200145 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200146 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200147};
148
Jan Kiszka90260c62013-05-26 21:46:51 +0200149#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
150typedef struct subpage_t {
151 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200152 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200153 hwaddr base;
154 uint16_t sub_section[TARGET_PAGE_SIZE];
155} subpage_t;
156
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200157#define PHYS_SECTION_UNASSIGNED 0
158#define PHYS_SECTION_NOTDIRTY 1
159#define PHYS_SECTION_ROM 2
160#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200161
pbrooke2eef172008-06-08 01:09:01 +0000162static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300163static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000164static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000165
Avi Kivity1ec9b902012-01-02 12:47:48 +0200166static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100167
168/**
169 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
170 * @cpu: the CPU whose AddressSpace this is
171 * @as: the AddressSpace itself
172 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
173 * @tcg_as_listener: listener for tracking changes to the AddressSpace
174 */
175struct CPUAddressSpace {
176 CPUState *cpu;
177 AddressSpace *as;
178 struct AddressSpaceDispatch *memory_dispatch;
179 MemoryListener tcg_as_listener;
180};
181
pbrook6658ffb2007-03-16 23:58:11 +0000182#endif
bellard54936002003-05-13 00:25:15 +0000183
Paul Brook6d9a1302010-02-28 23:55:53 +0000184#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200185
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
191 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 }
193}
194
Paolo Bonzinidb946042015-05-21 15:12:29 +0200195static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200196{
197 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200198 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199 PhysPageEntry e;
200 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200202 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200205 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200206
207 e.skip = leaf ? 0 : 1;
208 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100209 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200212 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200213}
214
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
216 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200217 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218{
219 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100220 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200222 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200225 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200227
Paolo Bonzini03f49952013-11-07 17:14:36 +0100228 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200229 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200230 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200231 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200232 *index += step;
233 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200234 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200235 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200236 }
237 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200238 }
239}
240
Avi Kivityac1970f2012-10-03 16:22:53 +0200241static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200242 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200243 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000244{
Avi Kivity29990972012-02-13 20:21:20 +0200245 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000247
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200248 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000249}
250
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200251/* Compact a non leaf page entry. Simply detect that the entry has a single child,
252 * and update our entry so we can skip it and go directly to the destination.
253 */
254static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
255{
256 unsigned valid_ptr = P_L2_SIZE;
257 int valid = 0;
258 PhysPageEntry *p;
259 int i;
260
261 if (lp->ptr == PHYS_MAP_NODE_NIL) {
262 return;
263 }
264
265 p = nodes[lp->ptr];
266 for (i = 0; i < P_L2_SIZE; i++) {
267 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
268 continue;
269 }
270
271 valid_ptr = i;
272 valid++;
273 if (p[i].skip) {
274 phys_page_compact(&p[i], nodes, compacted);
275 }
276 }
277
278 /* We can only compress if there's only one child. */
279 if (valid != 1) {
280 return;
281 }
282
283 assert(valid_ptr < P_L2_SIZE);
284
285 /* Don't compress if it won't fit in the # of bits we have. */
286 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
287 return;
288 }
289
290 lp->ptr = p[valid_ptr].ptr;
291 if (!p[valid_ptr].skip) {
292 /* If our only child is a leaf, make this a leaf. */
293 /* By design, we should have made this node a leaf to begin with so we
294 * should never reach here.
295 * But since it's so simple to handle this, let's do it just in case we
296 * change this rule.
297 */
298 lp->skip = 0;
299 } else {
300 lp->skip += p[valid_ptr].skip;
301 }
302}
303
304static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
305{
306 DECLARE_BITMAP(compacted, nodes_nb);
307
308 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200309 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200310 }
311}
312
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200313static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200314 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000315{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200317 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200318 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200319
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200320 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200321 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200323 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200324 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100325 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200326 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200327
328 if (sections[lp.ptr].size.hi ||
329 range_covers_byte(sections[lp.ptr].offset_within_address_space,
330 sections[lp.ptr].size.lo, addr)) {
331 return &sections[lp.ptr];
332 } else {
333 return &sections[PHYS_SECTION_UNASSIGNED];
334 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200335}
336
Blue Swirle5548612012-04-21 13:08:33 +0000337bool memory_region_is_unassigned(MemoryRegion *mr)
338{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200339 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000340 && mr != &io_mem_watch;
341}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200342
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100343/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200344static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200345 hwaddr addr,
346 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200347{
Jan Kiszka90260c62013-05-26 21:46:51 +0200348 MemoryRegionSection *section;
349 subpage_t *subpage;
350
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200351 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 if (resolve_subpage && section->mr->subpage) {
353 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200354 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200355 }
356 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200357}
358
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100359/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200360static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200361address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200363{
364 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200365 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100366 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200368 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200369 /* Compute offset within MemoryRegionSection */
370 addr -= section->offset_within_address_space;
371
372 /* Compute offset within MemoryRegion */
373 *xlat = addr + section->offset_within_region;
374
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200375 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200376
377 /* MMIO registers can be expected to perform full-width accesses based only
378 * on their address, without considering adjacent registers that could
379 * decode to completely different MemoryRegions. When such registers
380 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
381 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * here.
383 *
384 * If the length is small (as is the case for address_space_ldl/stl),
385 * everything works fine. If the incoming length is large, however,
386 * the caller really has to do the clamping through memory_access_size.
387 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200389 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200390 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
391 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200392 return section;
393}
Jan Kiszka90260c62013-05-26 21:46:51 +0200394
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100395static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
396{
397 if (memory_region_is_ram(mr)) {
398 return !(is_write && mr->readonly);
399 }
400 if (memory_region_is_romd(mr)) {
401 return !is_write;
402 }
403
404 return false;
405}
406
Paolo Bonzini41063e12015-03-18 14:21:43 +0100407/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200408MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
409 hwaddr *xlat, hwaddr *plen,
410 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200411{
Avi Kivity30951152012-10-30 13:47:46 +0200412 IOMMUTLBEntry iotlb;
413 MemoryRegionSection *section;
414 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200415
416 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100417 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
418 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200419 mr = section->mr;
420
421 if (!mr->iommu_ops) {
422 break;
423 }
424
Le Tan8d7b8cb2014-08-16 13:55:37 +0800425 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200426 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
427 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700428 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200429 if (!(iotlb.perm & (1 << is_write))) {
430 mr = &io_mem_unassigned;
431 break;
432 }
433
434 as = iotlb.target_as;
435 }
436
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000437 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100438 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700439 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100440 }
441
Avi Kivity30951152012-10-30 13:47:46 +0200442 *xlat = addr;
443 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200444}
445
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100446/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200447MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200448address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
449 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200450{
Avi Kivity30951152012-10-30 13:47:46 +0200451 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100452 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200453 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200454
455 assert(!section->mr->iommu_ops);
456 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200457}
bellard9fa3e852004-01-04 18:06:42 +0000458#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000459
Andreas Färberb170fce2013-01-20 20:23:22 +0100460#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000461
Juan Quintelae59fb372009-09-29 22:48:21 +0200462static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200463{
Andreas Färber259186a2013-01-17 18:51:17 +0100464 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200465
aurel323098dba2009-03-07 21:28:24 +0000466 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
467 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100468 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100469 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000470
471 return 0;
472}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474static int cpu_common_pre_load(void *opaque)
475{
476 CPUState *cpu = opaque;
477
Paolo Bonziniadee6422014-12-19 12:53:14 +0100478 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400479
480 return 0;
481}
482
483static bool cpu_common_exception_index_needed(void *opaque)
484{
485 CPUState *cpu = opaque;
486
Paolo Bonziniadee6422014-12-19 12:53:14 +0100487 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400488}
489
490static const VMStateDescription vmstate_cpu_common_exception_index = {
491 .name = "cpu_common/exception_index",
492 .version_id = 1,
493 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200494 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400495 .fields = (VMStateField[]) {
496 VMSTATE_INT32(exception_index, CPUState),
497 VMSTATE_END_OF_LIST()
498 }
499};
500
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300501static bool cpu_common_crash_occurred_needed(void *opaque)
502{
503 CPUState *cpu = opaque;
504
505 return cpu->crash_occurred;
506}
507
508static const VMStateDescription vmstate_cpu_common_crash_occurred = {
509 .name = "cpu_common/crash_occurred",
510 .version_id = 1,
511 .minimum_version_id = 1,
512 .needed = cpu_common_crash_occurred_needed,
513 .fields = (VMStateField[]) {
514 VMSTATE_BOOL(crash_occurred, CPUState),
515 VMSTATE_END_OF_LIST()
516 }
517};
518
Andreas Färber1a1562f2013-06-17 04:09:11 +0200519const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200520 .name = "cpu_common",
521 .version_id = 1,
522 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400523 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200524 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200525 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100526 VMSTATE_UINT32(halted, CPUState),
527 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200528 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400529 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200530 .subsections = (const VMStateDescription*[]) {
531 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300532 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200533 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200534 }
535};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200536
pbrook9656f322008-07-01 20:01:19 +0000537#endif
538
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100539CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400540{
Andreas Färberbdc44642013-06-24 23:50:24 +0200541 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400542
Andreas Färberbdc44642013-06-24 23:50:24 +0200543 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100544 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200545 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100546 }
Glauber Costa950f1472009-06-09 12:15:18 -0400547 }
548
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400550}
551
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000552#if !defined(CONFIG_USER_ONLY)
553void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
554{
555 /* We only support one address space per cpu at the moment. */
556 assert(cpu->as == as);
557
Peter Maydell32857f42015-10-01 15:29:50 +0100558 if (cpu->cpu_ases) {
559 /* We've already registered the listener for our only AS */
560 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000561 }
Peter Maydell32857f42015-10-01 15:29:50 +0100562
563 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
564 cpu->cpu_ases[0].cpu = cpu;
565 cpu->cpu_ases[0].as = as;
566 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
567 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000568}
569#endif
570
Bharata B Raob7bca732015-06-23 19:31:13 -0700571#ifndef CONFIG_USER_ONLY
572static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
573
574static int cpu_get_free_index(Error **errp)
575{
576 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
577
578 if (cpu >= MAX_CPUMASK_BITS) {
579 error_setg(errp, "Trying to use more CPUs than max of %d",
580 MAX_CPUMASK_BITS);
581 return -1;
582 }
583
584 bitmap_set(cpu_index_map, cpu, 1);
585 return cpu;
586}
587
588void cpu_exec_exit(CPUState *cpu)
589{
590 if (cpu->cpu_index == -1) {
591 /* cpu_index was never allocated by this @cpu or was already freed. */
592 return;
593 }
594
595 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
596 cpu->cpu_index = -1;
597}
598#else
599
600static int cpu_get_free_index(Error **errp)
601{
602 CPUState *some_cpu;
603 int cpu_index = 0;
604
605 CPU_FOREACH(some_cpu) {
606 cpu_index++;
607 }
608 return cpu_index;
609}
610
611void cpu_exec_exit(CPUState *cpu)
612{
613}
614#endif
615
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700616void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000617{
Andreas Färberb170fce2013-01-20 20:23:22 +0100618 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000619 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700620 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000621
Eduardo Habkost291135b2015-04-27 17:00:33 -0300622#ifndef CONFIG_USER_ONLY
623 cpu->as = &address_space_memory;
624 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300625#endif
626
pbrookc2764712009-03-07 15:24:59 +0000627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700630 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
631 if (local_err) {
632 error_propagate(errp, local_err);
633#if defined(CONFIG_USER_ONLY)
634 cpu_list_unlock();
635#endif
636 return;
bellard6a00d602005-11-21 23:25:50 +0000637 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200638 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000639#if defined(CONFIG_USER_ONLY)
640 cpu_list_unlock();
641#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
644 }
pbrookb3c77242008-06-30 16:31:04 +0000645#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600646 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700647 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100648 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200649 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000650#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100651 if (cc->vmsd != NULL) {
652 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
653 }
bellardfd6ce8f2003-05-14 19:00:11 +0000654}
655
Paul Brook94df27f2010-02-28 23:47:45 +0000656#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200657static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000658{
659 tb_invalidate_phys_page_range(pc, pc + 1, 0);
660}
661#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200662static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400663{
Max Filippove8262a12013-09-27 22:29:17 +0400664 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
665 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000666 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100667 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400668 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400669}
bellardc27004e2005-01-03 23:35:10 +0000670#endif
bellardd720b932004-04-25 17:57:43 +0000671
Paul Brookc527ee82010-03-01 03:31:14 +0000672#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200673void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000674
675{
676}
677
Peter Maydell3ee887e2014-09-12 14:06:48 +0100678int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
679 int flags)
680{
681 return -ENOSYS;
682}
683
684void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
685{
686}
687
Andreas Färber75a34032013-09-02 16:57:02 +0200688int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000689 int flags, CPUWatchpoint **watchpoint)
690{
691 return -ENOSYS;
692}
693#else
pbrook6658ffb2007-03-16 23:58:11 +0000694/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200695int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000696 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000697{
aliguoric0ce9982008-11-25 22:13:57 +0000698 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000699
Peter Maydell05068c02014-09-12 14:06:48 +0100700 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700701 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200702 error_report("tried to set invalid watchpoint at %"
703 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000704 return -EINVAL;
705 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500706 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000707
aliguoria1d1bb32008-11-18 20:07:32 +0000708 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100709 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000710 wp->flags = flags;
711
aliguori2dc9f412008-11-18 20:56:59 +0000712 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200713 if (flags & BP_GDB) {
714 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
715 } else {
716 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
717 }
aliguoria1d1bb32008-11-18 20:07:32 +0000718
Andreas Färber31b030d2013-09-04 01:29:02 +0200719 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000720
721 if (watchpoint)
722 *watchpoint = wp;
723 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000724}
725
aliguoria1d1bb32008-11-18 20:07:32 +0000726/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200727int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000728 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000729{
aliguoria1d1bb32008-11-18 20:07:32 +0000730 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000731
Andreas Färberff4700b2013-08-26 18:23:18 +0200732 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100733 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000734 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200735 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000736 return 0;
737 }
738 }
aliguoria1d1bb32008-11-18 20:07:32 +0000739 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000740}
741
aliguoria1d1bb32008-11-18 20:07:32 +0000742/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200743void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000744{
Andreas Färberff4700b2013-08-26 18:23:18 +0200745 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000746
Andreas Färber31b030d2013-09-04 01:29:02 +0200747 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000748
Anthony Liguori7267c092011-08-20 22:09:37 -0500749 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000750}
751
aliguoria1d1bb32008-11-18 20:07:32 +0000752/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200753void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000754{
aliguoric0ce9982008-11-25 22:13:57 +0000755 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Andreas Färberff4700b2013-08-26 18:23:18 +0200757 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200758 if (wp->flags & mask) {
759 cpu_watchpoint_remove_by_ref(cpu, wp);
760 }
aliguoric0ce9982008-11-25 22:13:57 +0000761 }
aliguoria1d1bb32008-11-18 20:07:32 +0000762}
Peter Maydell05068c02014-09-12 14:06:48 +0100763
764/* Return true if this watchpoint address matches the specified
765 * access (ie the address range covered by the watchpoint overlaps
766 * partially or completely with the address range covered by the
767 * access).
768 */
769static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
770 vaddr addr,
771 vaddr len)
772{
773 /* We know the lengths are non-zero, but a little caution is
774 * required to avoid errors in the case where the range ends
775 * exactly at the top of the address space and so addr + len
776 * wraps round to zero.
777 */
778 vaddr wpend = wp->vaddr + wp->len - 1;
779 vaddr addrend = addr + len - 1;
780
781 return !(addr > wpend || wp->vaddr > addrend);
782}
783
Paul Brookc527ee82010-03-01 03:31:14 +0000784#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000785
786/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200787int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000788 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000789{
aliguoric0ce9982008-11-25 22:13:57 +0000790 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000791
Anthony Liguori7267c092011-08-20 22:09:37 -0500792 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000793
794 bp->pc = pc;
795 bp->flags = flags;
796
aliguori2dc9f412008-11-18 20:56:59 +0000797 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200798 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200799 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200800 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200801 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200802 }
aliguoria1d1bb32008-11-18 20:07:32 +0000803
Andreas Färberf0c3c502013-08-26 21:22:53 +0200804 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000805
Andreas Färber00b941e2013-06-29 18:55:54 +0200806 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000807 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200808 }
aliguoria1d1bb32008-11-18 20:07:32 +0000809 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000810}
811
812/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200813int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000814{
aliguoria1d1bb32008-11-18 20:07:32 +0000815 CPUBreakpoint *bp;
816
Andreas Färberf0c3c502013-08-26 21:22:53 +0200817 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000818 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200819 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000820 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000821 }
bellard4c3a88a2003-07-26 12:06:08 +0000822 }
aliguoria1d1bb32008-11-18 20:07:32 +0000823 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000824}
825
aliguoria1d1bb32008-11-18 20:07:32 +0000826/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200827void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000828{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200829 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
830
831 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000832
Anthony Liguori7267c092011-08-20 22:09:37 -0500833 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000834}
835
836/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200837void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000838{
aliguoric0ce9982008-11-25 22:13:57 +0000839 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000840
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200842 if (bp->flags & mask) {
843 cpu_breakpoint_remove_by_ref(cpu, bp);
844 }
aliguoric0ce9982008-11-25 22:13:57 +0000845 }
bellard4c3a88a2003-07-26 12:06:08 +0000846}
847
bellardc33a3462003-07-29 20:50:33 +0000848/* enable or disable single step mode. EXCP_DEBUG is returned by the
849 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200850void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000851{
Andreas Färbered2803d2013-06-21 20:20:45 +0200852 if (cpu->singlestep_enabled != enabled) {
853 cpu->singlestep_enabled = enabled;
854 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200855 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200856 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100857 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000858 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700859 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000860 }
bellardc33a3462003-07-29 20:50:33 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862}
863
Andreas Färbera47dddd2013-09-03 17:38:47 +0200864void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000865{
866 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000867 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000868
869 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000870 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000871 fprintf(stderr, "qemu: fatal: ");
872 vfprintf(stderr, fmt, ap);
873 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200874 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000875 if (qemu_log_enabled()) {
876 qemu_log("qemu: fatal: ");
877 qemu_log_vprintf(fmt, ap2);
878 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200879 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000880 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000881 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000882 }
pbrook493ae1f2007-11-23 16:53:59 +0000883 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000884 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200885#if defined(CONFIG_USER_ONLY)
886 {
887 struct sigaction act;
888 sigfillset(&act.sa_mask);
889 act.sa_handler = SIG_DFL;
890 sigaction(SIGABRT, &act, NULL);
891 }
892#endif
bellard75012672003-06-21 13:11:07 +0000893 abort();
894}
895
bellard01243112004-01-04 15:48:17 +0000896#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400897/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200898static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
899{
900 RAMBlock *block;
901
Paolo Bonzini43771532013-09-09 17:58:40 +0200902 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200903 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200904 goto found;
905 }
Mike Day0dc3f442013-09-05 14:41:35 -0400906 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200907 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200908 goto found;
909 }
910 }
911
912 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
913 abort();
914
915found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200916 /* It is safe to write mru_block outside the iothread lock. This
917 * is what happens:
918 *
919 * mru_block = xxx
920 * rcu_read_unlock()
921 * xxx removed from list
922 * rcu_read_lock()
923 * read mru_block
924 * mru_block = NULL;
925 * call_rcu(reclaim_ramblock, xxx);
926 * rcu_read_unlock()
927 *
928 * atomic_rcu_set is not needed here. The block was already published
929 * when it was placed into the list. Here we're just making an extra
930 * copy of the pointer.
931 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932 ram_list.mru_block = block;
933 return block;
934}
935
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200936static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000937{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700938 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200940 RAMBlock *block;
941 ram_addr_t end;
942
943 end = TARGET_PAGE_ALIGN(start + length);
944 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000945
Mike Day0dc3f442013-09-05 14:41:35 -0400946 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200947 block = qemu_get_ram_block(start);
948 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200949 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700950 CPU_FOREACH(cpu) {
951 tlb_reset_dirty(cpu, start1, length);
952 }
Mike Day0dc3f442013-09-05 14:41:35 -0400953 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200954}
955
956/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000957bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
958 ram_addr_t length,
959 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200960{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000961 unsigned long end, page;
962 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200963
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000964 if (length == 0) {
965 return false;
966 }
967
968 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
969 page = start >> TARGET_PAGE_BITS;
970 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
971 page, end - page);
972
973 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200974 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200975 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000976
977 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000978}
979
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100980/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200981hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200982 MemoryRegionSection *section,
983 target_ulong vaddr,
984 hwaddr paddr, hwaddr xlat,
985 int prot,
986 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000987{
Avi Kivitya8170e52012-10-23 12:30:10 +0200988 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000989 CPUWatchpoint *wp;
990
Blue Swirlcc5bea62012-04-14 14:56:48 +0000991 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000992 /* Normal RAM. */
993 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200994 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000995 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200996 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000997 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000999 }
1000 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001001 AddressSpaceDispatch *d;
1002
1003 d = atomic_rcu_read(&section->address_space->dispatch);
1004 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001005 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001006 }
1007
1008 /* Make accesses to pages with watchpoints go via the
1009 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001010 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001011 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001012 /* Avoid trapping reads of pages with a write breakpoint. */
1013 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001014 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001015 *address |= TLB_MMIO;
1016 break;
1017 }
1018 }
1019 }
1020
1021 return iotlb;
1022}
bellard9fa3e852004-01-04 18:06:42 +00001023#endif /* defined(CONFIG_USER_ONLY) */
1024
pbrooke2eef172008-06-08 01:09:01 +00001025#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001026
Anthony Liguoric227f092009-10-01 16:12:16 -05001027static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001028 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001029static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001030
Igor Mammedova2b257d2014-10-31 16:38:37 +00001031static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1032 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001033
1034/*
1035 * Set a custom physical guest memory alloator.
1036 * Accelerators with unusual needs may need this. Hopefully, we can
1037 * get rid of it eventually.
1038 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001039void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001040{
1041 phys_mem_alloc = alloc;
1042}
1043
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001044static uint16_t phys_section_add(PhysPageMap *map,
1045 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001046{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001047 /* The physical section number is ORed with a page-aligned
1048 * pointer to produce the iotlb entries. Thus it should
1049 * never overflow into the page-aligned value.
1050 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001051 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001052
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 if (map->sections_nb == map->sections_nb_alloc) {
1054 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1055 map->sections = g_renew(MemoryRegionSection, map->sections,
1056 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001057 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001058 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001059 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001061}
1062
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001063static void phys_section_destroy(MemoryRegion *mr)
1064{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001065 memory_region_unref(mr);
1066
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001067 if (mr->subpage) {
1068 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001069 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001070 g_free(subpage);
1071 }
1072}
1073
Paolo Bonzini60926662013-05-29 12:30:26 +02001074static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001075{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001076 while (map->sections_nb > 0) {
1077 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001078 phys_section_destroy(section->mr);
1079 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001080 g_free(map->sections);
1081 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001082}
1083
Avi Kivityac1970f2012-10-03 16:22:53 +02001084static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085{
1086 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001087 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001088 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001089 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001090 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001091 MemoryRegionSection subsection = {
1092 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001093 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001094 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001095 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096
Avi Kivityf3705d52012-03-08 16:16:34 +02001097 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098
Avi Kivityf3705d52012-03-08 16:16:34 +02001099 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001100 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001101 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001103 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001105 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001106 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 }
1108 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001110 subpage_register(subpage, start, end,
1111 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112}
1113
1114
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001115static void register_multipage(AddressSpaceDispatch *d,
1116 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001117{
Avi Kivitya8170e52012-10-23 12:30:10 +02001118 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001119 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001120 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1121 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001122
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001123 assert(num_pages);
1124 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001125}
1126
Avi Kivityac1970f2012-10-03 16:22:53 +02001127static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001128{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001129 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001130 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001131 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001133
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001134 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1135 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1136 - now.offset_within_address_space;
1137
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001138 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001139 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001140 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001141 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001142 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 while (int128_ne(remain.size, now.size)) {
1144 remain.size = int128_sub(remain.size, now.size);
1145 remain.offset_within_address_space += int128_get64(now.size);
1146 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001147 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001149 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001150 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001151 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001152 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001153 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001155 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001156 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 }
1158}
1159
Sheng Yang62a27442010-01-26 19:21:16 +08001160void qemu_flush_coalesced_mmio_buffer(void)
1161{
1162 if (kvm_enabled())
1163 kvm_flush_coalesced_mmio_buffer();
1164}
1165
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001166void qemu_mutex_lock_ramlist(void)
1167{
1168 qemu_mutex_lock(&ram_list.mutex);
1169}
1170
1171void qemu_mutex_unlock_ramlist(void)
1172{
1173 qemu_mutex_unlock(&ram_list.mutex);
1174}
1175
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001176#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001177
1178#include <sys/vfs.h>
1179
1180#define HUGETLBFS_MAGIC 0x958458f6
1181
Hu Taofc7a5802014-09-09 13:28:01 +08001182static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001183{
1184 struct statfs fs;
1185 int ret;
1186
1187 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001188 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001189 } while (ret != 0 && errno == EINTR);
1190
1191 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001192 error_setg_errno(errp, errno, "failed to get page size of file %s",
1193 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001194 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195 }
1196
1197 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001198 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199
1200 return fs.f_bsize;
1201}
1202
Alex Williamson04b16652010-07-02 11:13:17 -06001203static void *file_ram_alloc(RAMBlock *block,
1204 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001205 const char *path,
1206 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001207{
1208 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001209 char *sanitized_name;
1210 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001211 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001212 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001213 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001214 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215
Hu Taofc7a5802014-09-09 13:28:01 +08001216 hpagesize = gethugepagesize(path, &local_err);
1217 if (local_err) {
1218 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001219 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001220 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001221 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001222
1223 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001224 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1225 "or larger than huge page size 0x%" PRIx64,
1226 memory, hpagesize);
1227 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001228 }
1229
1230 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001231 error_setg(errp,
1232 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001233 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001234 }
1235
Peter Feiner8ca761f2013-03-04 13:54:25 -05001236 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001237 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001238 for (c = sanitized_name; *c != '\0'; c++) {
1239 if (*c == '/')
1240 *c = '_';
1241 }
1242
1243 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1244 sanitized_name);
1245 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001246
1247 fd = mkstemp(filename);
1248 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001249 error_setg_errno(errp, errno,
1250 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001251 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001252 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001253 }
1254 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001255 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001256
Chen Hanxiao9284f312015-07-24 11:12:03 +08001257 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001258
1259 /*
1260 * ftruncate is not supported by hugetlbfs in older
1261 * hosts, so don't bother bailing out on errors.
1262 * If anything goes wrong with it under other filesystems,
1263 * mmap will fail.
1264 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001265 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001266 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001267 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001268
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001269 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001270 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001271 error_setg_errno(errp, errno,
1272 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001273 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001274 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001275 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001276
1277 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001278 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001279 }
1280
Alex Williamson04b16652010-07-02 11:13:17 -06001281 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001282 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001283
1284error:
1285 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001286 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001287 exit(1);
1288 }
1289 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001290}
1291#endif
1292
Mike Day0dc3f442013-09-05 14:41:35 -04001293/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001294static ram_addr_t find_ram_offset(ram_addr_t size)
1295{
Alex Williamson04b16652010-07-02 11:13:17 -06001296 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001297 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001298
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001299 assert(size != 0); /* it would hand out same offset multiple times */
1300
Mike Day0dc3f442013-09-05 14:41:35 -04001301 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001302 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001303 }
Alex Williamson04b16652010-07-02 11:13:17 -06001304
Mike Day0dc3f442013-09-05 14:41:35 -04001305 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001306 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001307
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001308 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001309
Mike Day0dc3f442013-09-05 14:41:35 -04001310 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001311 if (next_block->offset >= end) {
1312 next = MIN(next, next_block->offset);
1313 }
1314 }
1315 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001316 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001317 mingap = next - end;
1318 }
1319 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001320
1321 if (offset == RAM_ADDR_MAX) {
1322 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1323 (uint64_t)size);
1324 abort();
1325 }
1326
Alex Williamson04b16652010-07-02 11:13:17 -06001327 return offset;
1328}
1329
Juan Quintela652d7ec2012-07-20 10:37:54 +02001330ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001331{
Alex Williamsond17b5282010-06-25 11:08:38 -06001332 RAMBlock *block;
1333 ram_addr_t last = 0;
1334
Mike Day0dc3f442013-09-05 14:41:35 -04001335 rcu_read_lock();
1336 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001337 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001338 }
Mike Day0dc3f442013-09-05 14:41:35 -04001339 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001340 return last;
1341}
1342
Jason Baronddb97f12012-08-02 15:44:16 -04001343static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1344{
1345 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001346
1347 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001348 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001349 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1350 if (ret) {
1351 perror("qemu_madvise");
1352 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1353 "but dump_guest_core=off specified\n");
1354 }
1355 }
1356}
1357
Mike Day0dc3f442013-09-05 14:41:35 -04001358/* Called within an RCU critical section, or while the ramlist lock
1359 * is held.
1360 */
Hu Tao20cfe882014-04-02 15:13:26 +08001361static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001362{
Hu Tao20cfe882014-04-02 15:13:26 +08001363 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001364
Mike Day0dc3f442013-09-05 14:41:35 -04001365 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001366 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001367 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001368 }
1369 }
Hu Tao20cfe882014-04-02 15:13:26 +08001370
1371 return NULL;
1372}
1373
Mike Dayae3a7042013-09-05 14:41:35 -04001374/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001375void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1376{
Mike Dayae3a7042013-09-05 14:41:35 -04001377 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001378
Mike Day0dc3f442013-09-05 14:41:35 -04001379 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001380 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001381 assert(new_block);
1382 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001383
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001384 if (dev) {
1385 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001386 if (id) {
1387 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001388 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001389 }
1390 }
1391 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1392
Mike Day0dc3f442013-09-05 14:41:35 -04001393 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001394 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001395 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1396 new_block->idstr);
1397 abort();
1398 }
1399 }
Mike Day0dc3f442013-09-05 14:41:35 -04001400 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001401}
1402
Mike Dayae3a7042013-09-05 14:41:35 -04001403/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001404void qemu_ram_unset_idstr(ram_addr_t addr)
1405{
Mike Dayae3a7042013-09-05 14:41:35 -04001406 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001407
Mike Dayae3a7042013-09-05 14:41:35 -04001408 /* FIXME: arch_init.c assumes that this is not called throughout
1409 * migration. Ignore the problem since hot-unplug during migration
1410 * does not work anyway.
1411 */
1412
Mike Day0dc3f442013-09-05 14:41:35 -04001413 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001414 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001415 if (block) {
1416 memset(block->idstr, 0, sizeof(block->idstr));
1417 }
Mike Day0dc3f442013-09-05 14:41:35 -04001418 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001419}
1420
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001421static int memory_try_enable_merging(void *addr, size_t len)
1422{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001423 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001424 /* disabled by the user */
1425 return 0;
1426 }
1427
1428 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1429}
1430
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001431/* Only legal before guest might have detected the memory size: e.g. on
1432 * incoming migration, or right after reset.
1433 *
1434 * As memory core doesn't know how is memory accessed, it is up to
1435 * resize callback to update device state and/or add assertions to detect
1436 * misuse, if necessary.
1437 */
1438int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1439{
1440 RAMBlock *block = find_ram_block(base);
1441
1442 assert(block);
1443
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001444 newsize = TARGET_PAGE_ALIGN(newsize);
1445
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001446 if (block->used_length == newsize) {
1447 return 0;
1448 }
1449
1450 if (!(block->flags & RAM_RESIZEABLE)) {
1451 error_setg_errno(errp, EINVAL,
1452 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1453 " in != 0x" RAM_ADDR_FMT, block->idstr,
1454 newsize, block->used_length);
1455 return -EINVAL;
1456 }
1457
1458 if (block->max_length < newsize) {
1459 error_setg_errno(errp, EINVAL,
1460 "Length too large: %s: 0x" RAM_ADDR_FMT
1461 " > 0x" RAM_ADDR_FMT, block->idstr,
1462 newsize, block->max_length);
1463 return -EINVAL;
1464 }
1465
1466 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1467 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001468 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1469 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001470 memory_region_set_size(block->mr, newsize);
1471 if (block->resized) {
1472 block->resized(block->idstr, newsize, block->host);
1473 }
1474 return 0;
1475}
1476
Hu Taoef701d72014-09-09 13:27:54 +08001477static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001478{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001479 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001480 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001481 ram_addr_t old_ram_size, new_ram_size;
1482
1483 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001484
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001485 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001486 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001487
1488 if (!new_block->host) {
1489 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001490 xen_ram_alloc(new_block->offset, new_block->max_length,
1491 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001492 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001493 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001494 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001495 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001496 error_setg_errno(errp, errno,
1497 "cannot set up guest memory '%s'",
1498 memory_region_name(new_block->mr));
1499 qemu_mutex_unlock_ramlist();
1500 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001501 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001502 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001503 }
1504 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001505
Li Zhijiandd631692015-07-02 20:18:06 +08001506 new_ram_size = MAX(old_ram_size,
1507 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1508 if (new_ram_size > old_ram_size) {
1509 migration_bitmap_extend(old_ram_size, new_ram_size);
1510 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001511 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1512 * QLIST (which has an RCU-friendly variant) does not have insertion at
1513 * tail, so save the last element in last_block.
1514 */
Mike Day0dc3f442013-09-05 14:41:35 -04001515 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001516 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001517 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001518 break;
1519 }
1520 }
1521 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001522 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001523 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001524 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001525 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001526 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001527 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001528 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001529
Mike Day0dc3f442013-09-05 14:41:35 -04001530 /* Write list before version */
1531 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001532 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001533 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001534
Juan Quintela2152f5c2013-10-08 13:52:02 +02001535 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1536
1537 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001538 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001539
1540 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001541 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1542 ram_list.dirty_memory[i] =
1543 bitmap_zero_extend(ram_list.dirty_memory[i],
1544 old_ram_size, new_ram_size);
1545 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001546 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001547 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001548 new_block->used_length,
1549 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001550
Paolo Bonzinia904c912015-01-21 16:18:35 +01001551 if (new_block->host) {
1552 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1553 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1554 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1555 if (kvm_enabled()) {
1556 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1557 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001558 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001559
1560 return new_block->offset;
1561}
1562
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001563#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001564ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001565 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001566 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001567{
1568 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001569 ram_addr_t addr;
1570 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001571
1572 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001573 error_setg(errp, "-mem-path not supported with Xen");
1574 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575 }
1576
1577 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1578 /*
1579 * file_ram_alloc() needs to allocate just like
1580 * phys_mem_alloc, but we haven't bothered to provide
1581 * a hook there.
1582 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001583 error_setg(errp,
1584 "-mem-path not supported with this accelerator");
1585 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001586 }
1587
1588 size = TARGET_PAGE_ALIGN(size);
1589 new_block = g_malloc0(sizeof(*new_block));
1590 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001591 new_block->used_length = size;
1592 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001593 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001594 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001595 new_block->host = file_ram_alloc(new_block, size,
1596 mem_path, errp);
1597 if (!new_block->host) {
1598 g_free(new_block);
1599 return -1;
1600 }
1601
Hu Taoef701d72014-09-09 13:27:54 +08001602 addr = ram_block_add(new_block, &local_err);
1603 if (local_err) {
1604 g_free(new_block);
1605 error_propagate(errp, local_err);
1606 return -1;
1607 }
1608 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001609}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001610#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001611
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001612static
1613ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1614 void (*resized)(const char*,
1615 uint64_t length,
1616 void *host),
1617 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001618 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001619{
1620 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001621 ram_addr_t addr;
1622 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001623
1624 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001625 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001626 new_block = g_malloc0(sizeof(*new_block));
1627 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001628 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001629 new_block->used_length = size;
1630 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001631 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001632 new_block->fd = -1;
1633 new_block->host = host;
1634 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001635 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001637 if (resizeable) {
1638 new_block->flags |= RAM_RESIZEABLE;
1639 }
Hu Taoef701d72014-09-09 13:27:54 +08001640 addr = ram_block_add(new_block, &local_err);
1641 if (local_err) {
1642 g_free(new_block);
1643 error_propagate(errp, local_err);
1644 return -1;
1645 }
1646 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001647}
1648
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001649ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1650 MemoryRegion *mr, Error **errp)
1651{
1652 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1653}
1654
Hu Taoef701d72014-09-09 13:27:54 +08001655ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001656{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001657 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1658}
1659
1660ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1661 void (*resized)(const char*,
1662 uint64_t length,
1663 void *host),
1664 MemoryRegion *mr, Error **errp)
1665{
1666 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001667}
bellarde9a1ab12007-02-08 23:08:38 +00001668
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001669void qemu_ram_free_from_ptr(ram_addr_t addr)
1670{
1671 RAMBlock *block;
1672
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001673 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001674 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001675 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001676 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001677 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001678 /* Write list before version */
1679 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001680 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001681 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001682 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001683 }
1684 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001685 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001686}
1687
Paolo Bonzini43771532013-09-09 17:58:40 +02001688static void reclaim_ramblock(RAMBlock *block)
1689{
1690 if (block->flags & RAM_PREALLOC) {
1691 ;
1692 } else if (xen_enabled()) {
1693 xen_invalidate_map_cache_entry(block->host);
1694#ifndef _WIN32
1695 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001696 if (block->flags & RAM_FILE) {
1697 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001698 } else {
1699 munmap(block->host, block->max_length);
1700 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001701 close(block->fd);
1702#endif
1703 } else {
1704 qemu_anon_ram_free(block->host, block->max_length);
1705 }
1706 g_free(block);
1707}
1708
Anthony Liguoric227f092009-10-01 16:12:16 -05001709void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001710{
Alex Williamson04b16652010-07-02 11:13:17 -06001711 RAMBlock *block;
1712
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001713 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001714 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001715 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001716 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001717 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001718 /* Write list before version */
1719 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001720 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001721 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001722 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001723 }
1724 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001725 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001726}
1727
Huang Yingcd19cfa2011-03-02 08:56:19 +01001728#ifndef _WIN32
1729void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1730{
1731 RAMBlock *block;
1732 ram_addr_t offset;
1733 int flags;
1734 void *area, *vaddr;
1735
Mike Day0dc3f442013-09-05 14:41:35 -04001736 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001737 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001738 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001739 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001740 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001741 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001742 } else if (xen_enabled()) {
1743 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001744 } else {
1745 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001746 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001747 flags |= (block->flags & RAM_SHARED ?
1748 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001749 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1750 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001751 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001752 /*
1753 * Remap needs to match alloc. Accelerators that
1754 * set phys_mem_alloc never remap. If they did,
1755 * we'd need a remap hook here.
1756 */
1757 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1758
Huang Yingcd19cfa2011-03-02 08:56:19 +01001759 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1760 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1761 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001762 }
1763 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001764 fprintf(stderr, "Could not remap addr: "
1765 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001766 length, addr);
1767 exit(1);
1768 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001769 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001770 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001771 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001772 }
1773 }
1774}
1775#endif /* !_WIN32 */
1776
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001777int qemu_get_ram_fd(ram_addr_t addr)
1778{
Mike Dayae3a7042013-09-05 14:41:35 -04001779 RAMBlock *block;
1780 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001781
Mike Day0dc3f442013-09-05 14:41:35 -04001782 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001783 block = qemu_get_ram_block(addr);
1784 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001785 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001786 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001787}
1788
Damjan Marion3fd74b82014-06-26 23:01:32 +02001789void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1790{
Mike Dayae3a7042013-09-05 14:41:35 -04001791 RAMBlock *block;
1792 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001793
Mike Day0dc3f442013-09-05 14:41:35 -04001794 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001795 block = qemu_get_ram_block(addr);
1796 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001797 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001798 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001799}
1800
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001801/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001802 * This should not be used for general purpose DMA. Use address_space_map
1803 * or address_space_rw instead. For local memory (e.g. video ram) that the
1804 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001805 *
1806 * By the time this function returns, the returned pointer is not protected
1807 * by RCU anymore. If the caller is not within an RCU critical section and
1808 * does not hold the iothread lock, it must have other means of protecting the
1809 * pointer, such as a reference to the region that includes the incoming
1810 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001811 */
1812void *qemu_get_ram_ptr(ram_addr_t addr)
1813{
Mike Dayae3a7042013-09-05 14:41:35 -04001814 RAMBlock *block;
1815 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001816
Mike Day0dc3f442013-09-05 14:41:35 -04001817 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001818 block = qemu_get_ram_block(addr);
1819
1820 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001821 /* We need to check if the requested address is in the RAM
1822 * because we don't want to map the entire memory in QEMU.
1823 * In that case just map until the end of the page.
1824 */
1825 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001826 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001827 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001828 }
Mike Dayae3a7042013-09-05 14:41:35 -04001829
1830 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001831 }
Mike Dayae3a7042013-09-05 14:41:35 -04001832 ptr = ramblock_ptr(block, addr - block->offset);
1833
Mike Day0dc3f442013-09-05 14:41:35 -04001834unlock:
1835 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001836 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001837}
1838
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001839/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001840 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001841 *
1842 * By the time this function returns, the returned pointer is not protected
1843 * by RCU anymore. If the caller is not within an RCU critical section and
1844 * does not hold the iothread lock, it must have other means of protecting the
1845 * pointer, such as a reference to the region that includes the incoming
1846 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001847 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001848static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001849{
Mike Dayae3a7042013-09-05 14:41:35 -04001850 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001851 if (*size == 0) {
1852 return NULL;
1853 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001854 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001855 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001856 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001857 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001858 rcu_read_lock();
1859 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001860 if (addr - block->offset < block->max_length) {
1861 if (addr - block->offset + *size > block->max_length)
1862 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001863 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001864 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001865 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001866 }
1867 }
1868
1869 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1870 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001871 }
1872}
1873
Paolo Bonzini7443b432013-06-03 12:44:02 +02001874/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001875 * (typically a TLB entry) back to a ram offset.
1876 *
1877 * By the time this function returns, the returned pointer is not protected
1878 * by RCU anymore. If the caller is not within an RCU critical section and
1879 * does not hold the iothread lock, it must have other means of protecting the
1880 * pointer, such as a reference to the region that includes the incoming
1881 * ram_addr_t.
1882 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001883MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001884{
pbrook94a6b542009-04-11 17:15:54 +00001885 RAMBlock *block;
1886 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001887 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001888
Jan Kiszka868bb332011-06-21 22:59:09 +02001889 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001890 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001891 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001892 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001893 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001894 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001895 }
1896
Mike Day0dc3f442013-09-05 14:41:35 -04001897 rcu_read_lock();
1898 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001899 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001900 goto found;
1901 }
1902
Mike Day0dc3f442013-09-05 14:41:35 -04001903 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001904 /* This case append when the block is not mapped. */
1905 if (block->host == NULL) {
1906 continue;
1907 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001908 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001909 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001910 }
pbrook94a6b542009-04-11 17:15:54 +00001911 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001912
Mike Day0dc3f442013-09-05 14:41:35 -04001913 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001914 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001915
1916found:
1917 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001918 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001919 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001920 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001921}
Alex Williamsonf471a172010-06-11 11:11:42 -06001922
Avi Kivitya8170e52012-10-23 12:30:10 +02001923static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001924 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001925{
Juan Quintela52159192013-10-08 12:44:04 +02001926 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001927 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001928 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001929 switch (size) {
1930 case 1:
1931 stb_p(qemu_get_ram_ptr(ram_addr), val);
1932 break;
1933 case 2:
1934 stw_p(qemu_get_ram_ptr(ram_addr), val);
1935 break;
1936 case 4:
1937 stl_p(qemu_get_ram_ptr(ram_addr), val);
1938 break;
1939 default:
1940 abort();
1941 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001942 /* Set both VGA and migration bits for simplicity and to remove
1943 * the notdirty callback faster.
1944 */
1945 cpu_physical_memory_set_dirty_range(ram_addr, size,
1946 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001947 /* we remove the notdirty callback only if the code has been
1948 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001949 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001950 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001951 }
bellard1ccde1c2004-02-06 19:46:14 +00001952}
1953
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001954static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1955 unsigned size, bool is_write)
1956{
1957 return is_write;
1958}
1959
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001960static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001961 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001962 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001963 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001964};
1965
pbrook0f459d12008-06-09 00:20:13 +00001966/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001967static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001968{
Andreas Färber93afead2013-08-26 03:41:01 +02001969 CPUState *cpu = current_cpu;
1970 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001971 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001972 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001973 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001974 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001975
Andreas Färberff4700b2013-08-26 18:23:18 +02001976 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001977 /* We re-entered the check after replacing the TB. Now raise
1978 * the debug interrupt so that is will trigger after the
1979 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001980 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001981 return;
1982 }
Andreas Färber93afead2013-08-26 03:41:01 +02001983 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001984 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001985 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1986 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001987 if (flags == BP_MEM_READ) {
1988 wp->flags |= BP_WATCHPOINT_HIT_READ;
1989 } else {
1990 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1991 }
1992 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001993 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001994 if (!cpu->watchpoint_hit) {
1995 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001996 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001997 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001998 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001999 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002000 } else {
2001 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002002 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002003 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002004 }
aliguori06d55cc2008-11-18 20:24:06 +00002005 }
aliguori6e140f22008-11-18 20:37:55 +00002006 } else {
2007 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002008 }
2009 }
2010}
2011
pbrook6658ffb2007-03-16 23:58:11 +00002012/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2013 so these check for a hit then pass through to the normal out-of-line
2014 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002015static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2016 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002017{
Peter Maydell66b9b432015-04-26 16:49:24 +01002018 MemTxResult res;
2019 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002020
Peter Maydell66b9b432015-04-26 16:49:24 +01002021 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002022 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002023 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002024 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002025 break;
2026 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002027 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002028 break;
2029 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002030 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002031 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002032 default: abort();
2033 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002034 *pdata = data;
2035 return res;
2036}
2037
2038static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2039 uint64_t val, unsigned size,
2040 MemTxAttrs attrs)
2041{
2042 MemTxResult res;
2043
2044 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2045 switch (size) {
2046 case 1:
2047 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2048 break;
2049 case 2:
2050 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2051 break;
2052 case 4:
2053 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2054 break;
2055 default: abort();
2056 }
2057 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002058}
2059
Avi Kivity1ec9b902012-01-02 12:47:48 +02002060static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002061 .read_with_attrs = watch_mem_read,
2062 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002063 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002064};
pbrook6658ffb2007-03-16 23:58:11 +00002065
Peter Maydellf25a49e2015-04-26 16:49:24 +01002066static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2067 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002068{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002069 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002070 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002071 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002072
blueswir1db7b5422007-05-26 17:36:03 +00002073#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002074 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002075 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002076#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002077 res = address_space_read(subpage->as, addr + subpage->base,
2078 attrs, buf, len);
2079 if (res) {
2080 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002081 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002082 switch (len) {
2083 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002084 *data = ldub_p(buf);
2085 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002086 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002087 *data = lduw_p(buf);
2088 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002089 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002090 *data = ldl_p(buf);
2091 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002092 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002093 *data = ldq_p(buf);
2094 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002095 default:
2096 abort();
2097 }
blueswir1db7b5422007-05-26 17:36:03 +00002098}
2099
Peter Maydellf25a49e2015-04-26 16:49:24 +01002100static MemTxResult subpage_write(void *opaque, hwaddr addr,
2101 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002102{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002103 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002104 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002105
blueswir1db7b5422007-05-26 17:36:03 +00002106#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002107 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002108 " value %"PRIx64"\n",
2109 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002110#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002111 switch (len) {
2112 case 1:
2113 stb_p(buf, value);
2114 break;
2115 case 2:
2116 stw_p(buf, value);
2117 break;
2118 case 4:
2119 stl_p(buf, value);
2120 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002121 case 8:
2122 stq_p(buf, value);
2123 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002124 default:
2125 abort();
2126 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002127 return address_space_write(subpage->as, addr + subpage->base,
2128 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002129}
2130
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002131static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002132 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002133{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002135#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002136 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002137 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002138#endif
2139
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002140 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002141 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002142}
2143
Avi Kivity70c68e42012-01-02 12:32:48 +02002144static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002145 .read_with_attrs = subpage_read,
2146 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002147 .impl.min_access_size = 1,
2148 .impl.max_access_size = 8,
2149 .valid.min_access_size = 1,
2150 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002151 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002152 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002153};
2154
Anthony Liguoric227f092009-10-01 16:12:16 -05002155static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002156 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002157{
2158 int idx, eidx;
2159
2160 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2161 return -1;
2162 idx = SUBPAGE_IDX(start);
2163 eidx = SUBPAGE_IDX(end);
2164#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002165 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2166 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002167#endif
blueswir1db7b5422007-05-26 17:36:03 +00002168 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002169 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002170 }
2171
2172 return 0;
2173}
2174
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002175static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002176{
Anthony Liguoric227f092009-10-01 16:12:16 -05002177 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002178
Anthony Liguori7267c092011-08-20 22:09:37 -05002179 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002180
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002181 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002182 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002183 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002184 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002185 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002186#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002187 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2188 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002189#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002190 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002191
2192 return mmio;
2193}
2194
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002195static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2196 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002197{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002198 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002199 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002200 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002201 .mr = mr,
2202 .offset_within_address_space = 0,
2203 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002204 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002205 };
2206
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002207 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002208}
2209
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002210MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002211{
Peter Maydell32857f42015-10-01 15:29:50 +01002212 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2213 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002214 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002215
2216 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002217}
2218
Avi Kivitye9179ce2009-06-14 11:38:52 +03002219static void io_mem_init(void)
2220{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002221 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002222 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002223 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002224 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002225 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002226 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002227 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002228}
2229
Avi Kivityac1970f2012-10-03 16:22:53 +02002230static void mem_begin(MemoryListener *listener)
2231{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002232 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002233 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2234 uint16_t n;
2235
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002236 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002237 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002238 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002239 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002240 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002241 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002242 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002243 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002244
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002245 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002246 d->as = as;
2247 as->next_dispatch = d;
2248}
2249
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002250static void address_space_dispatch_free(AddressSpaceDispatch *d)
2251{
2252 phys_sections_free(&d->map);
2253 g_free(d);
2254}
2255
Paolo Bonzini00752702013-05-29 12:13:54 +02002256static void mem_commit(MemoryListener *listener)
2257{
2258 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002259 AddressSpaceDispatch *cur = as->dispatch;
2260 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002261
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002262 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002263
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002264 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002265 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002266 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002267 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002268}
2269
Avi Kivity1d711482012-10-02 18:54:45 +02002270static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002271{
Peter Maydell32857f42015-10-01 15:29:50 +01002272 CPUAddressSpace *cpuas;
2273 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002274
2275 /* since each CPU stores ram addresses in its TLB cache, we must
2276 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002277 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2278 cpu_reloading_memory_map();
2279 /* The CPU and TLB are protected by the iothread lock.
2280 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2281 * may have split the RCU critical section.
2282 */
2283 d = atomic_rcu_read(&cpuas->as->dispatch);
2284 cpuas->memory_dispatch = d;
2285 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002286}
2287
Avi Kivityac1970f2012-10-03 16:22:53 +02002288void address_space_init_dispatch(AddressSpace *as)
2289{
Paolo Bonzini00752702013-05-29 12:13:54 +02002290 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002291 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002292 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002293 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002294 .region_add = mem_add,
2295 .region_nop = mem_add,
2296 .priority = 0,
2297 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002298 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002299}
2300
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002301void address_space_unregister(AddressSpace *as)
2302{
2303 memory_listener_unregister(&as->dispatch_listener);
2304}
2305
Avi Kivity83f3c252012-10-07 12:59:55 +02002306void address_space_destroy_dispatch(AddressSpace *as)
2307{
2308 AddressSpaceDispatch *d = as->dispatch;
2309
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002310 atomic_rcu_set(&as->dispatch, NULL);
2311 if (d) {
2312 call_rcu(d, address_space_dispatch_free, rcu);
2313 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002314}
2315
Avi Kivity62152b82011-07-26 14:26:14 +03002316static void memory_map_init(void)
2317{
Anthony Liguori7267c092011-08-20 22:09:37 -05002318 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002319
Paolo Bonzini57271d62013-11-07 17:14:37 +01002320 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002321 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002322
Anthony Liguori7267c092011-08-20 22:09:37 -05002323 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002324 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2325 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002326 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002327}
2328
2329MemoryRegion *get_system_memory(void)
2330{
2331 return system_memory;
2332}
2333
Avi Kivity309cb472011-08-08 16:09:03 +03002334MemoryRegion *get_system_io(void)
2335{
2336 return system_io;
2337}
2338
pbrooke2eef172008-06-08 01:09:01 +00002339#endif /* !defined(CONFIG_USER_ONLY) */
2340
bellard13eb76e2004-01-24 15:23:36 +00002341/* physical memory access (slow version, mainly for debug) */
2342#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002343int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002344 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002345{
2346 int l, flags;
2347 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002348 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002349
2350 while (len > 0) {
2351 page = addr & TARGET_PAGE_MASK;
2352 l = (page + TARGET_PAGE_SIZE) - addr;
2353 if (l > len)
2354 l = len;
2355 flags = page_get_flags(page);
2356 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002357 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002358 if (is_write) {
2359 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002360 return -1;
bellard579a97f2007-11-11 14:26:47 +00002361 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002362 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002363 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002364 memcpy(p, buf, l);
2365 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002366 } else {
2367 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002368 return -1;
bellard579a97f2007-11-11 14:26:47 +00002369 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002370 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002371 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002372 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002373 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002374 }
2375 len -= l;
2376 buf += l;
2377 addr += l;
2378 }
Paul Brooka68fe892010-03-01 00:08:59 +00002379 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002380}
bellard8df1cd02005-01-28 22:37:22 +00002381
bellard13eb76e2004-01-24 15:23:36 +00002382#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002383
Paolo Bonzini845b6212015-03-23 11:45:53 +01002384static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002385 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002386{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002387 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2388 /* No early return if dirty_log_mask is or becomes 0, because
2389 * cpu_physical_memory_set_dirty_range will still call
2390 * xen_modified_memory.
2391 */
2392 if (dirty_log_mask) {
2393 dirty_log_mask =
2394 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002395 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002396 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2397 tb_invalidate_phys_range(addr, addr + length);
2398 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2399 }
2400 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002401}
2402
Richard Henderson23326162013-07-08 14:55:59 -07002403static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002404{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002405 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002406
2407 /* Regions are assumed to support 1-4 byte accesses unless
2408 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002409 if (access_size_max == 0) {
2410 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002411 }
Richard Henderson23326162013-07-08 14:55:59 -07002412
2413 /* Bound the maximum access by the alignment of the address. */
2414 if (!mr->ops->impl.unaligned) {
2415 unsigned align_size_max = addr & -addr;
2416 if (align_size_max != 0 && align_size_max < access_size_max) {
2417 access_size_max = align_size_max;
2418 }
2419 }
2420
2421 /* Don't attempt accesses larger than the maximum. */
2422 if (l > access_size_max) {
2423 l = access_size_max;
2424 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002425 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002426
2427 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002428}
2429
Jan Kiszka4840f102015-06-18 18:47:22 +02002430static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002431{
Jan Kiszka4840f102015-06-18 18:47:22 +02002432 bool unlocked = !qemu_mutex_iothread_locked();
2433 bool release_lock = false;
2434
2435 if (unlocked && mr->global_locking) {
2436 qemu_mutex_lock_iothread();
2437 unlocked = false;
2438 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002439 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002440 if (mr->flush_coalesced_mmio) {
2441 if (unlocked) {
2442 qemu_mutex_lock_iothread();
2443 }
2444 qemu_flush_coalesced_mmio_buffer();
2445 if (unlocked) {
2446 qemu_mutex_unlock_iothread();
2447 }
2448 }
2449
2450 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002451}
2452
Peter Maydell5c9eb022015-04-26 16:49:24 +01002453MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2454 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002455{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002456 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002457 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002458 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002459 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002460 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002461 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002462 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002463
Paolo Bonzini41063e12015-03-18 14:21:43 +01002464 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002465 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002466 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002467 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002468
bellard13eb76e2004-01-24 15:23:36 +00002469 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002470 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002471 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002472 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002473 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002474 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002475 switch (l) {
2476 case 8:
2477 /* 64 bit write access */
2478 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002479 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2480 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002481 break;
2482 case 4:
bellard1c213d12005-09-03 10:49:04 +00002483 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002484 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002485 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2486 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002487 break;
2488 case 2:
bellard1c213d12005-09-03 10:49:04 +00002489 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002490 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002491 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2492 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002493 break;
2494 case 1:
bellard1c213d12005-09-03 10:49:04 +00002495 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002496 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002497 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2498 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002499 break;
2500 default:
2501 abort();
bellard13eb76e2004-01-24 15:23:36 +00002502 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002503 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002504 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002505 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002506 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002507 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002508 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002509 }
2510 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002511 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002512 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002513 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002514 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002515 switch (l) {
2516 case 8:
2517 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002518 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2519 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002520 stq_p(buf, val);
2521 break;
2522 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002523 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002524 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2525 attrs);
bellardc27004e2005-01-03 23:35:10 +00002526 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002527 break;
2528 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002529 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002530 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2531 attrs);
bellardc27004e2005-01-03 23:35:10 +00002532 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002533 break;
2534 case 1:
bellard1c213d12005-09-03 10:49:04 +00002535 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002536 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2537 attrs);
bellardc27004e2005-01-03 23:35:10 +00002538 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002539 break;
2540 default:
2541 abort();
bellard13eb76e2004-01-24 15:23:36 +00002542 }
2543 } else {
2544 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002545 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002546 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002547 }
2548 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002549
2550 if (release_lock) {
2551 qemu_mutex_unlock_iothread();
2552 release_lock = false;
2553 }
2554
bellard13eb76e2004-01-24 15:23:36 +00002555 len -= l;
2556 buf += l;
2557 addr += l;
2558 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002559 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002560
Peter Maydell3b643492015-04-26 16:49:23 +01002561 return result;
bellard13eb76e2004-01-24 15:23:36 +00002562}
bellard8df1cd02005-01-28 22:37:22 +00002563
Peter Maydell5c9eb022015-04-26 16:49:24 +01002564MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2565 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002566{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002567 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002568}
2569
Peter Maydell5c9eb022015-04-26 16:49:24 +01002570MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2571 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002572{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002573 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002574}
2575
2576
Avi Kivitya8170e52012-10-23 12:30:10 +02002577void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002578 int len, int is_write)
2579{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002580 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2581 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002582}
2583
Alexander Graf582b55a2013-12-11 14:17:44 +01002584enum write_rom_type {
2585 WRITE_DATA,
2586 FLUSH_CACHE,
2587};
2588
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002589static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002590 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002591{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002592 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002593 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002594 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002595 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002596
Paolo Bonzini41063e12015-03-18 14:21:43 +01002597 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002598 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002599 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002600 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002601
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002602 if (!(memory_region_is_ram(mr) ||
2603 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002604 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002605 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002606 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002607 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002608 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002609 switch (type) {
2610 case WRITE_DATA:
2611 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002612 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002613 break;
2614 case FLUSH_CACHE:
2615 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2616 break;
2617 }
bellardd0ecd2a2006-04-23 17:14:48 +00002618 }
2619 len -= l;
2620 buf += l;
2621 addr += l;
2622 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002623 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002624}
2625
Alexander Graf582b55a2013-12-11 14:17:44 +01002626/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002627void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002628 const uint8_t *buf, int len)
2629{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002630 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002631}
2632
2633void cpu_flush_icache_range(hwaddr start, int len)
2634{
2635 /*
2636 * This function should do the same thing as an icache flush that was
2637 * triggered from within the guest. For TCG we are always cache coherent,
2638 * so there is no need to flush anything. For KVM / Xen we need to flush
2639 * the host's instruction cache at least.
2640 */
2641 if (tcg_enabled()) {
2642 return;
2643 }
2644
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002645 cpu_physical_memory_write_rom_internal(&address_space_memory,
2646 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002647}
2648
aliguori6d16c2f2009-01-22 16:59:11 +00002649typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002650 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002651 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002652 hwaddr addr;
2653 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002654 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002655} BounceBuffer;
2656
2657static BounceBuffer bounce;
2658
aliguoriba223c22009-01-22 16:59:16 +00002659typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002660 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002661 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002662} MapClient;
2663
Fam Zheng38e047b2015-03-16 17:03:35 +08002664QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002665static QLIST_HEAD(map_client_list, MapClient) map_client_list
2666 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002667
Fam Zhenge95205e2015-03-16 17:03:37 +08002668static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002669{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002670 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002671 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002672}
2673
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002674static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002675{
2676 MapClient *client;
2677
Blue Swirl72cf2d42009-09-12 07:36:22 +00002678 while (!QLIST_EMPTY(&map_client_list)) {
2679 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002680 qemu_bh_schedule(client->bh);
2681 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002682 }
2683}
2684
Fam Zhenge95205e2015-03-16 17:03:37 +08002685void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002686{
2687 MapClient *client = g_malloc(sizeof(*client));
2688
Fam Zheng38e047b2015-03-16 17:03:35 +08002689 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002690 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002691 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002692 if (!atomic_read(&bounce.in_use)) {
2693 cpu_notify_map_clients_locked();
2694 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002695 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002696}
2697
Fam Zheng38e047b2015-03-16 17:03:35 +08002698void cpu_exec_init_all(void)
2699{
2700 qemu_mutex_init(&ram_list.mutex);
2701 memory_map_init();
2702 io_mem_init();
2703 qemu_mutex_init(&map_client_list_lock);
2704}
2705
Fam Zhenge95205e2015-03-16 17:03:37 +08002706void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002707{
Fam Zhenge95205e2015-03-16 17:03:37 +08002708 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002709
Fam Zhenge95205e2015-03-16 17:03:37 +08002710 qemu_mutex_lock(&map_client_list_lock);
2711 QLIST_FOREACH(client, &map_client_list, link) {
2712 if (client->bh == bh) {
2713 cpu_unregister_map_client_do(client);
2714 break;
2715 }
2716 }
2717 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002718}
2719
2720static void cpu_notify_map_clients(void)
2721{
Fam Zheng38e047b2015-03-16 17:03:35 +08002722 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002723 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002724 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002725}
2726
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002727bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2728{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002729 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002730 hwaddr l, xlat;
2731
Paolo Bonzini41063e12015-03-18 14:21:43 +01002732 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002733 while (len > 0) {
2734 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002735 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2736 if (!memory_access_is_direct(mr, is_write)) {
2737 l = memory_access_size(mr, l, addr);
2738 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002739 return false;
2740 }
2741 }
2742
2743 len -= l;
2744 addr += l;
2745 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002746 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002747 return true;
2748}
2749
aliguori6d16c2f2009-01-22 16:59:11 +00002750/* Map a physical memory region into a host virtual address.
2751 * May map a subset of the requested range, given by and returned in *plen.
2752 * May return NULL if resources needed to perform the mapping are exhausted.
2753 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002754 * Use cpu_register_map_client() to know when retrying the map operation is
2755 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002756 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002757void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002758 hwaddr addr,
2759 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002760 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002761{
Avi Kivitya8170e52012-10-23 12:30:10 +02002762 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002763 hwaddr done = 0;
2764 hwaddr l, xlat, base;
2765 MemoryRegion *mr, *this_mr;
2766 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002767
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002768 if (len == 0) {
2769 return NULL;
2770 }
aliguori6d16c2f2009-01-22 16:59:11 +00002771
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002772 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002773 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002774 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002775
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002776 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002777 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002778 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002779 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002780 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002781 /* Avoid unbounded allocations */
2782 l = MIN(l, TARGET_PAGE_SIZE);
2783 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002784 bounce.addr = addr;
2785 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002786
2787 memory_region_ref(mr);
2788 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002789 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002790 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2791 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002792 }
aliguori6d16c2f2009-01-22 16:59:11 +00002793
Paolo Bonzini41063e12015-03-18 14:21:43 +01002794 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002795 *plen = l;
2796 return bounce.buffer;
2797 }
2798
2799 base = xlat;
2800 raddr = memory_region_get_ram_addr(mr);
2801
2802 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002803 len -= l;
2804 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002805 done += l;
2806 if (len == 0) {
2807 break;
2808 }
2809
2810 l = len;
2811 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2812 if (this_mr != mr || xlat != base + done) {
2813 break;
2814 }
aliguori6d16c2f2009-01-22 16:59:11 +00002815 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002816
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002817 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002818 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002819 *plen = done;
2820 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002821}
2822
Avi Kivityac1970f2012-10-03 16:22:53 +02002823/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002824 * Will also mark the memory as dirty if is_write == 1. access_len gives
2825 * the amount of memory that was actually read or written by the caller.
2826 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002827void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2828 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002829{
2830 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002831 MemoryRegion *mr;
2832 ram_addr_t addr1;
2833
2834 mr = qemu_ram_addr_from_host(buffer, &addr1);
2835 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002836 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002837 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002838 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002839 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002840 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002841 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002842 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002843 return;
2844 }
2845 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002846 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2847 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002848 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002849 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002850 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002851 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002852 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002853 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002854}
bellardd0ecd2a2006-04-23 17:14:48 +00002855
Avi Kivitya8170e52012-10-23 12:30:10 +02002856void *cpu_physical_memory_map(hwaddr addr,
2857 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002858 int is_write)
2859{
2860 return address_space_map(&address_space_memory, addr, plen, is_write);
2861}
2862
Avi Kivitya8170e52012-10-23 12:30:10 +02002863void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2864 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002865{
2866 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2867}
2868
bellard8df1cd02005-01-28 22:37:22 +00002869/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002870static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2871 MemTxAttrs attrs,
2872 MemTxResult *result,
2873 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002874{
bellard8df1cd02005-01-28 22:37:22 +00002875 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002876 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002877 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002878 hwaddr l = 4;
2879 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002880 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002881 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002882
Paolo Bonzini41063e12015-03-18 14:21:43 +01002883 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002884 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002885 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002886 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002887
bellard8df1cd02005-01-28 22:37:22 +00002888 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002889 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002890#if defined(TARGET_WORDS_BIGENDIAN)
2891 if (endian == DEVICE_LITTLE_ENDIAN) {
2892 val = bswap32(val);
2893 }
2894#else
2895 if (endian == DEVICE_BIG_ENDIAN) {
2896 val = bswap32(val);
2897 }
2898#endif
bellard8df1cd02005-01-28 22:37:22 +00002899 } else {
2900 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002901 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002902 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002903 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002904 switch (endian) {
2905 case DEVICE_LITTLE_ENDIAN:
2906 val = ldl_le_p(ptr);
2907 break;
2908 case DEVICE_BIG_ENDIAN:
2909 val = ldl_be_p(ptr);
2910 break;
2911 default:
2912 val = ldl_p(ptr);
2913 break;
2914 }
Peter Maydell50013112015-04-26 16:49:24 +01002915 r = MEMTX_OK;
2916 }
2917 if (result) {
2918 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002919 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002920 if (release_lock) {
2921 qemu_mutex_unlock_iothread();
2922 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002923 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002924 return val;
2925}
2926
Peter Maydell50013112015-04-26 16:49:24 +01002927uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2928 MemTxAttrs attrs, MemTxResult *result)
2929{
2930 return address_space_ldl_internal(as, addr, attrs, result,
2931 DEVICE_NATIVE_ENDIAN);
2932}
2933
2934uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2935 MemTxAttrs attrs, MemTxResult *result)
2936{
2937 return address_space_ldl_internal(as, addr, attrs, result,
2938 DEVICE_LITTLE_ENDIAN);
2939}
2940
2941uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2942 MemTxAttrs attrs, MemTxResult *result)
2943{
2944 return address_space_ldl_internal(as, addr, attrs, result,
2945 DEVICE_BIG_ENDIAN);
2946}
2947
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002948uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002949{
Peter Maydell50013112015-04-26 16:49:24 +01002950 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002951}
2952
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002953uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002954{
Peter Maydell50013112015-04-26 16:49:24 +01002955 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002956}
2957
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002958uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002959{
Peter Maydell50013112015-04-26 16:49:24 +01002960 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002961}
2962
bellard84b7b8e2005-11-28 21:19:04 +00002963/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002964static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2965 MemTxAttrs attrs,
2966 MemTxResult *result,
2967 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002968{
bellard84b7b8e2005-11-28 21:19:04 +00002969 uint8_t *ptr;
2970 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002971 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002972 hwaddr l = 8;
2973 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002974 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002975 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002976
Paolo Bonzini41063e12015-03-18 14:21:43 +01002977 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002978 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002979 false);
2980 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002981 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002982
bellard84b7b8e2005-11-28 21:19:04 +00002983 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002984 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002985#if defined(TARGET_WORDS_BIGENDIAN)
2986 if (endian == DEVICE_LITTLE_ENDIAN) {
2987 val = bswap64(val);
2988 }
2989#else
2990 if (endian == DEVICE_BIG_ENDIAN) {
2991 val = bswap64(val);
2992 }
2993#endif
bellard84b7b8e2005-11-28 21:19:04 +00002994 } else {
2995 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002996 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002997 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002998 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002999 switch (endian) {
3000 case DEVICE_LITTLE_ENDIAN:
3001 val = ldq_le_p(ptr);
3002 break;
3003 case DEVICE_BIG_ENDIAN:
3004 val = ldq_be_p(ptr);
3005 break;
3006 default:
3007 val = ldq_p(ptr);
3008 break;
3009 }
Peter Maydell50013112015-04-26 16:49:24 +01003010 r = MEMTX_OK;
3011 }
3012 if (result) {
3013 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003014 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003015 if (release_lock) {
3016 qemu_mutex_unlock_iothread();
3017 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003018 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003019 return val;
3020}
3021
Peter Maydell50013112015-04-26 16:49:24 +01003022uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3023 MemTxAttrs attrs, MemTxResult *result)
3024{
3025 return address_space_ldq_internal(as, addr, attrs, result,
3026 DEVICE_NATIVE_ENDIAN);
3027}
3028
3029uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3030 MemTxAttrs attrs, MemTxResult *result)
3031{
3032 return address_space_ldq_internal(as, addr, attrs, result,
3033 DEVICE_LITTLE_ENDIAN);
3034}
3035
3036uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3037 MemTxAttrs attrs, MemTxResult *result)
3038{
3039 return address_space_ldq_internal(as, addr, attrs, result,
3040 DEVICE_BIG_ENDIAN);
3041}
3042
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003043uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003044{
Peter Maydell50013112015-04-26 16:49:24 +01003045 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003046}
3047
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003048uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003049{
Peter Maydell50013112015-04-26 16:49:24 +01003050 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003051}
3052
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003053uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003054{
Peter Maydell50013112015-04-26 16:49:24 +01003055 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003056}
3057
bellardaab33092005-10-30 20:48:42 +00003058/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003059uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3060 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003061{
3062 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003063 MemTxResult r;
3064
3065 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3066 if (result) {
3067 *result = r;
3068 }
bellardaab33092005-10-30 20:48:42 +00003069 return val;
3070}
3071
Peter Maydell50013112015-04-26 16:49:24 +01003072uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3073{
3074 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3075}
3076
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003077/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003078static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3079 hwaddr addr,
3080 MemTxAttrs attrs,
3081 MemTxResult *result,
3082 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003083{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003084 uint8_t *ptr;
3085 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003086 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003087 hwaddr l = 2;
3088 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003089 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003090 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003091
Paolo Bonzini41063e12015-03-18 14:21:43 +01003092 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003093 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003094 false);
3095 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003096 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003097
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003098 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003099 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003100#if defined(TARGET_WORDS_BIGENDIAN)
3101 if (endian == DEVICE_LITTLE_ENDIAN) {
3102 val = bswap16(val);
3103 }
3104#else
3105 if (endian == DEVICE_BIG_ENDIAN) {
3106 val = bswap16(val);
3107 }
3108#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003109 } else {
3110 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003111 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003112 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003113 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003114 switch (endian) {
3115 case DEVICE_LITTLE_ENDIAN:
3116 val = lduw_le_p(ptr);
3117 break;
3118 case DEVICE_BIG_ENDIAN:
3119 val = lduw_be_p(ptr);
3120 break;
3121 default:
3122 val = lduw_p(ptr);
3123 break;
3124 }
Peter Maydell50013112015-04-26 16:49:24 +01003125 r = MEMTX_OK;
3126 }
3127 if (result) {
3128 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003129 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003130 if (release_lock) {
3131 qemu_mutex_unlock_iothread();
3132 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003133 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003134 return val;
bellardaab33092005-10-30 20:48:42 +00003135}
3136
Peter Maydell50013112015-04-26 16:49:24 +01003137uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3138 MemTxAttrs attrs, MemTxResult *result)
3139{
3140 return address_space_lduw_internal(as, addr, attrs, result,
3141 DEVICE_NATIVE_ENDIAN);
3142}
3143
3144uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3145 MemTxAttrs attrs, MemTxResult *result)
3146{
3147 return address_space_lduw_internal(as, addr, attrs, result,
3148 DEVICE_LITTLE_ENDIAN);
3149}
3150
3151uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3152 MemTxAttrs attrs, MemTxResult *result)
3153{
3154 return address_space_lduw_internal(as, addr, attrs, result,
3155 DEVICE_BIG_ENDIAN);
3156}
3157
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003158uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159{
Peter Maydell50013112015-04-26 16:49:24 +01003160 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161}
3162
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003163uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003164{
Peter Maydell50013112015-04-26 16:49:24 +01003165 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003166}
3167
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003168uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003169{
Peter Maydell50013112015-04-26 16:49:24 +01003170 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003171}
3172
bellard8df1cd02005-01-28 22:37:22 +00003173/* warning: addr must be aligned. The ram page is not masked as dirty
3174 and the code inside is not invalidated. It is useful if the dirty
3175 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003176void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3177 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003178{
bellard8df1cd02005-01-28 22:37:22 +00003179 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003180 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003181 hwaddr l = 4;
3182 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003183 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003184 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003185 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003186
Paolo Bonzini41063e12015-03-18 14:21:43 +01003187 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003188 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003189 true);
3190 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003191 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003192
Peter Maydell50013112015-04-26 16:49:24 +01003193 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003194 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003195 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003196 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003197 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003198
Paolo Bonzini845b6212015-03-23 11:45:53 +01003199 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3200 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003201 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003202 r = MEMTX_OK;
3203 }
3204 if (result) {
3205 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003206 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003207 if (release_lock) {
3208 qemu_mutex_unlock_iothread();
3209 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003210 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003211}
3212
Peter Maydell50013112015-04-26 16:49:24 +01003213void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3214{
3215 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3216}
3217
bellard8df1cd02005-01-28 22:37:22 +00003218/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003219static inline void address_space_stl_internal(AddressSpace *as,
3220 hwaddr addr, uint32_t val,
3221 MemTxAttrs attrs,
3222 MemTxResult *result,
3223 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003224{
bellard8df1cd02005-01-28 22:37:22 +00003225 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003226 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003227 hwaddr l = 4;
3228 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003229 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003230 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003231
Paolo Bonzini41063e12015-03-18 14:21:43 +01003232 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003233 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003234 true);
3235 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003236 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003237
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003238#if defined(TARGET_WORDS_BIGENDIAN)
3239 if (endian == DEVICE_LITTLE_ENDIAN) {
3240 val = bswap32(val);
3241 }
3242#else
3243 if (endian == DEVICE_BIG_ENDIAN) {
3244 val = bswap32(val);
3245 }
3246#endif
Peter Maydell50013112015-04-26 16:49:24 +01003247 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003248 } else {
bellard8df1cd02005-01-28 22:37:22 +00003249 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003250 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003251 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003252 switch (endian) {
3253 case DEVICE_LITTLE_ENDIAN:
3254 stl_le_p(ptr, val);
3255 break;
3256 case DEVICE_BIG_ENDIAN:
3257 stl_be_p(ptr, val);
3258 break;
3259 default:
3260 stl_p(ptr, val);
3261 break;
3262 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003263 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003264 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003265 }
Peter Maydell50013112015-04-26 16:49:24 +01003266 if (result) {
3267 *result = r;
3268 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003269 if (release_lock) {
3270 qemu_mutex_unlock_iothread();
3271 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003272 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003273}
3274
3275void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3276 MemTxAttrs attrs, MemTxResult *result)
3277{
3278 address_space_stl_internal(as, addr, val, attrs, result,
3279 DEVICE_NATIVE_ENDIAN);
3280}
3281
3282void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3283 MemTxAttrs attrs, MemTxResult *result)
3284{
3285 address_space_stl_internal(as, addr, val, attrs, result,
3286 DEVICE_LITTLE_ENDIAN);
3287}
3288
3289void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3290 MemTxAttrs attrs, MemTxResult *result)
3291{
3292 address_space_stl_internal(as, addr, val, attrs, result,
3293 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003294}
3295
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003296void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003297{
Peter Maydell50013112015-04-26 16:49:24 +01003298 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299}
3300
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003301void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003302{
Peter Maydell50013112015-04-26 16:49:24 +01003303 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003304}
3305
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003306void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003307{
Peter Maydell50013112015-04-26 16:49:24 +01003308 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003309}
3310
bellardaab33092005-10-30 20:48:42 +00003311/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003312void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3313 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003314{
3315 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003316 MemTxResult r;
3317
3318 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3319 if (result) {
3320 *result = r;
3321 }
3322}
3323
3324void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3325{
3326 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003327}
3328
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003329/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003330static inline void address_space_stw_internal(AddressSpace *as,
3331 hwaddr addr, uint32_t val,
3332 MemTxAttrs attrs,
3333 MemTxResult *result,
3334 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003335{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003336 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003337 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003338 hwaddr l = 2;
3339 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003340 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003341 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003342
Paolo Bonzini41063e12015-03-18 14:21:43 +01003343 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003344 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003345 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003346 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003347
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003348#if defined(TARGET_WORDS_BIGENDIAN)
3349 if (endian == DEVICE_LITTLE_ENDIAN) {
3350 val = bswap16(val);
3351 }
3352#else
3353 if (endian == DEVICE_BIG_ENDIAN) {
3354 val = bswap16(val);
3355 }
3356#endif
Peter Maydell50013112015-04-26 16:49:24 +01003357 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003358 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003359 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003360 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003361 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003362 switch (endian) {
3363 case DEVICE_LITTLE_ENDIAN:
3364 stw_le_p(ptr, val);
3365 break;
3366 case DEVICE_BIG_ENDIAN:
3367 stw_be_p(ptr, val);
3368 break;
3369 default:
3370 stw_p(ptr, val);
3371 break;
3372 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003373 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003374 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003375 }
Peter Maydell50013112015-04-26 16:49:24 +01003376 if (result) {
3377 *result = r;
3378 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003379 if (release_lock) {
3380 qemu_mutex_unlock_iothread();
3381 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003382 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003383}
3384
3385void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3386 MemTxAttrs attrs, MemTxResult *result)
3387{
3388 address_space_stw_internal(as, addr, val, attrs, result,
3389 DEVICE_NATIVE_ENDIAN);
3390}
3391
3392void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3393 MemTxAttrs attrs, MemTxResult *result)
3394{
3395 address_space_stw_internal(as, addr, val, attrs, result,
3396 DEVICE_LITTLE_ENDIAN);
3397}
3398
3399void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3400 MemTxAttrs attrs, MemTxResult *result)
3401{
3402 address_space_stw_internal(as, addr, val, attrs, result,
3403 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003404}
3405
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003406void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003407{
Peter Maydell50013112015-04-26 16:49:24 +01003408 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003409}
3410
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003411void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003412{
Peter Maydell50013112015-04-26 16:49:24 +01003413 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003414}
3415
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003416void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003417{
Peter Maydell50013112015-04-26 16:49:24 +01003418 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003419}
3420
bellardaab33092005-10-30 20:48:42 +00003421/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003422void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3423 MemTxAttrs attrs, MemTxResult *result)
3424{
3425 MemTxResult r;
3426 val = tswap64(val);
3427 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3428 if (result) {
3429 *result = r;
3430 }
3431}
3432
3433void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3434 MemTxAttrs attrs, MemTxResult *result)
3435{
3436 MemTxResult r;
3437 val = cpu_to_le64(val);
3438 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3439 if (result) {
3440 *result = r;
3441 }
3442}
3443void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3444 MemTxAttrs attrs, MemTxResult *result)
3445{
3446 MemTxResult r;
3447 val = cpu_to_be64(val);
3448 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3449 if (result) {
3450 *result = r;
3451 }
3452}
3453
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003454void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003455{
Peter Maydell50013112015-04-26 16:49:24 +01003456 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003457}
3458
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003459void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003460{
Peter Maydell50013112015-04-26 16:49:24 +01003461 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003462}
3463
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003464void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003465{
Peter Maydell50013112015-04-26 16:49:24 +01003466 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003467}
3468
aliguori5e2972f2009-03-28 17:51:36 +00003469/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003470int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003471 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003472{
3473 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003474 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003475 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003476
3477 while (len > 0) {
3478 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003479 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003480 /* if no physical page mapped, return an error */
3481 if (phys_addr == -1)
3482 return -1;
3483 l = (page + TARGET_PAGE_SIZE) - addr;
3484 if (l > len)
3485 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003486 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003487 if (is_write) {
3488 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3489 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003490 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3491 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003492 }
bellard13eb76e2004-01-24 15:23:36 +00003493 len -= l;
3494 buf += l;
3495 addr += l;
3496 }
3497 return 0;
3498}
Paul Brooka68fe892010-03-01 00:08:59 +00003499#endif
bellard13eb76e2004-01-24 15:23:36 +00003500
Blue Swirl8e4a4242013-01-06 18:30:17 +00003501/*
3502 * A helper function for the _utterly broken_ virtio device model to find out if
3503 * it's running on a big endian machine. Don't do this at home kids!
3504 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003505bool target_words_bigendian(void);
3506bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003507{
3508#if defined(TARGET_WORDS_BIGENDIAN)
3509 return true;
3510#else
3511 return false;
3512#endif
3513}
3514
Wen Congyang76f35532012-05-07 12:04:18 +08003515#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003516bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003517{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003518 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003519 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003520 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003521
Paolo Bonzini41063e12015-03-18 14:21:43 +01003522 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003523 mr = address_space_translate(&address_space_memory,
3524 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003525
Paolo Bonzini41063e12015-03-18 14:21:43 +01003526 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3527 rcu_read_unlock();
3528 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003529}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003530
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003531int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003532{
3533 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003534 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003535
Mike Day0dc3f442013-09-05 14:41:35 -04003536 rcu_read_lock();
3537 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003538 ret = func(block->idstr, block->host, block->offset,
3539 block->used_length, opaque);
3540 if (ret) {
3541 break;
3542 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003543 }
Mike Day0dc3f442013-09-05 14:41:35 -04003544 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003545 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003546}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003547#endif