blob: 819ecc3d64ef8104fb19a56a9aa0b601eeef77d1 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030090/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030091 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030092#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000093#endif
bellard9fa3e852004-01-04 18:06:42 +000094
Andreas Färberbdc44642013-06-24 23:50:24 +020095struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000096/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020098__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000099/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000100 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000101 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100102int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000103
pbrooke2eef172008-06-08 01:09:01 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106typedef struct PhysPageEntry PhysPageEntry;
107
108struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200113};
114
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200115#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100118#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200120#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121#define P_L2_SIZE (1 << P_L2_BITS)
122
123#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100128 struct rcu_head rcu;
129
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100139 struct rcu_head rcu;
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141 /* This is a multi-level map on the physical address space.
142 * The bottom level has pointers to MemoryRegionSections.
143 */
144 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200145 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200146 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200147};
148
Jan Kiszka90260c62013-05-26 21:46:51 +0200149#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
150typedef struct subpage_t {
151 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200152 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200153 hwaddr base;
154 uint16_t sub_section[TARGET_PAGE_SIZE];
155} subpage_t;
156
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200157#define PHYS_SECTION_UNASSIGNED 0
158#define PHYS_SECTION_NOTDIRTY 1
159#define PHYS_SECTION_ROM 2
160#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200161
pbrooke2eef172008-06-08 01:09:01 +0000162static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300163static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000164static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000165
Avi Kivity1ec9b902012-01-02 12:47:48 +0200166static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100167
168/**
169 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
170 * @cpu: the CPU whose AddressSpace this is
171 * @as: the AddressSpace itself
172 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
173 * @tcg_as_listener: listener for tracking changes to the AddressSpace
174 */
175struct CPUAddressSpace {
176 CPUState *cpu;
177 AddressSpace *as;
178 struct AddressSpaceDispatch *memory_dispatch;
179 MemoryListener tcg_as_listener;
180};
181
pbrook6658ffb2007-03-16 23:58:11 +0000182#endif
bellard54936002003-05-13 00:25:15 +0000183
Paul Brook6d9a1302010-02-28 23:55:53 +0000184#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200185
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
191 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 }
193}
194
Paolo Bonzinidb946042015-05-21 15:12:29 +0200195static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200196{
197 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200198 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199 PhysPageEntry e;
200 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200202 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200205 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200206
207 e.skip = leaf ? 0 : 1;
208 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100209 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200212 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200213}
214
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
216 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200217 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218{
219 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100220 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200222 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200225 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200227
Paolo Bonzini03f49952013-11-07 17:14:36 +0100228 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200229 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200230 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200231 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200232 *index += step;
233 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200234 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200235 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200236 }
237 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200238 }
239}
240
Avi Kivityac1970f2012-10-03 16:22:53 +0200241static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200242 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200243 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000244{
Avi Kivity29990972012-02-13 20:21:20 +0200245 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000247
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200248 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000249}
250
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200251/* Compact a non leaf page entry. Simply detect that the entry has a single child,
252 * and update our entry so we can skip it and go directly to the destination.
253 */
254static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
255{
256 unsigned valid_ptr = P_L2_SIZE;
257 int valid = 0;
258 PhysPageEntry *p;
259 int i;
260
261 if (lp->ptr == PHYS_MAP_NODE_NIL) {
262 return;
263 }
264
265 p = nodes[lp->ptr];
266 for (i = 0; i < P_L2_SIZE; i++) {
267 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
268 continue;
269 }
270
271 valid_ptr = i;
272 valid++;
273 if (p[i].skip) {
274 phys_page_compact(&p[i], nodes, compacted);
275 }
276 }
277
278 /* We can only compress if there's only one child. */
279 if (valid != 1) {
280 return;
281 }
282
283 assert(valid_ptr < P_L2_SIZE);
284
285 /* Don't compress if it won't fit in the # of bits we have. */
286 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
287 return;
288 }
289
290 lp->ptr = p[valid_ptr].ptr;
291 if (!p[valid_ptr].skip) {
292 /* If our only child is a leaf, make this a leaf. */
293 /* By design, we should have made this node a leaf to begin with so we
294 * should never reach here.
295 * But since it's so simple to handle this, let's do it just in case we
296 * change this rule.
297 */
298 lp->skip = 0;
299 } else {
300 lp->skip += p[valid_ptr].skip;
301 }
302}
303
304static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
305{
306 DECLARE_BITMAP(compacted, nodes_nb);
307
308 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200309 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200310 }
311}
312
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200313static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200314 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000315{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200317 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200318 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200319
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200320 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200321 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200323 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200324 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100325 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200326 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200327
328 if (sections[lp.ptr].size.hi ||
329 range_covers_byte(sections[lp.ptr].offset_within_address_space,
330 sections[lp.ptr].size.lo, addr)) {
331 return &sections[lp.ptr];
332 } else {
333 return &sections[PHYS_SECTION_UNASSIGNED];
334 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200335}
336
Blue Swirle5548612012-04-21 13:08:33 +0000337bool memory_region_is_unassigned(MemoryRegion *mr)
338{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200339 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000340 && mr != &io_mem_watch;
341}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200342
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100343/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200344static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200345 hwaddr addr,
346 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200347{
Jan Kiszka90260c62013-05-26 21:46:51 +0200348 MemoryRegionSection *section;
349 subpage_t *subpage;
350
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200351 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 if (resolve_subpage && section->mr->subpage) {
353 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200354 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200355 }
356 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200357}
358
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100359/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200360static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200361address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200363{
364 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200365 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100366 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200368 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200369 /* Compute offset within MemoryRegionSection */
370 addr -= section->offset_within_address_space;
371
372 /* Compute offset within MemoryRegion */
373 *xlat = addr + section->offset_within_region;
374
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200375 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200376
377 /* MMIO registers can be expected to perform full-width accesses based only
378 * on their address, without considering adjacent registers that could
379 * decode to completely different MemoryRegions. When such registers
380 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
381 * regions overlap wildly. For this reason we cannot clamp the accesses
382 * here.
383 *
384 * If the length is small (as is the case for address_space_ldl/stl),
385 * everything works fine. If the incoming length is large, however,
386 * the caller really has to do the clamping through memory_access_size.
387 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200389 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200390 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
391 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200392 return section;
393}
Jan Kiszka90260c62013-05-26 21:46:51 +0200394
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100395static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
396{
397 if (memory_region_is_ram(mr)) {
398 return !(is_write && mr->readonly);
399 }
400 if (memory_region_is_romd(mr)) {
401 return !is_write;
402 }
403
404 return false;
405}
406
Paolo Bonzini41063e12015-03-18 14:21:43 +0100407/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200408MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
409 hwaddr *xlat, hwaddr *plen,
410 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200411{
Avi Kivity30951152012-10-30 13:47:46 +0200412 IOMMUTLBEntry iotlb;
413 MemoryRegionSection *section;
414 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200415
416 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100417 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
418 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200419 mr = section->mr;
420
421 if (!mr->iommu_ops) {
422 break;
423 }
424
Le Tan8d7b8cb2014-08-16 13:55:37 +0800425 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200426 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
427 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700428 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200429 if (!(iotlb.perm & (1 << is_write))) {
430 mr = &io_mem_unassigned;
431 break;
432 }
433
434 as = iotlb.target_as;
435 }
436
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000437 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100438 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700439 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100440 }
441
Avi Kivity30951152012-10-30 13:47:46 +0200442 *xlat = addr;
443 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200444}
445
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100446/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200447MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200448address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
449 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200450{
Avi Kivity30951152012-10-30 13:47:46 +0200451 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100452 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200453 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200454
455 assert(!section->mr->iommu_ops);
456 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200457}
bellard9fa3e852004-01-04 18:06:42 +0000458#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000459
Andreas Färberb170fce2013-01-20 20:23:22 +0100460#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000461
Juan Quintelae59fb372009-09-29 22:48:21 +0200462static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200463{
Andreas Färber259186a2013-01-17 18:51:17 +0100464 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200465
aurel323098dba2009-03-07 21:28:24 +0000466 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
467 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100468 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100469 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000470
471 return 0;
472}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474static int cpu_common_pre_load(void *opaque)
475{
476 CPUState *cpu = opaque;
477
Paolo Bonziniadee6422014-12-19 12:53:14 +0100478 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400479
480 return 0;
481}
482
483static bool cpu_common_exception_index_needed(void *opaque)
484{
485 CPUState *cpu = opaque;
486
Paolo Bonziniadee6422014-12-19 12:53:14 +0100487 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400488}
489
490static const VMStateDescription vmstate_cpu_common_exception_index = {
491 .name = "cpu_common/exception_index",
492 .version_id = 1,
493 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200494 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400495 .fields = (VMStateField[]) {
496 VMSTATE_INT32(exception_index, CPUState),
497 VMSTATE_END_OF_LIST()
498 }
499};
500
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300501static bool cpu_common_crash_occurred_needed(void *opaque)
502{
503 CPUState *cpu = opaque;
504
505 return cpu->crash_occurred;
506}
507
508static const VMStateDescription vmstate_cpu_common_crash_occurred = {
509 .name = "cpu_common/crash_occurred",
510 .version_id = 1,
511 .minimum_version_id = 1,
512 .needed = cpu_common_crash_occurred_needed,
513 .fields = (VMStateField[]) {
514 VMSTATE_BOOL(crash_occurred, CPUState),
515 VMSTATE_END_OF_LIST()
516 }
517};
518
Andreas Färber1a1562f2013-06-17 04:09:11 +0200519const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200520 .name = "cpu_common",
521 .version_id = 1,
522 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400523 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200524 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200525 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100526 VMSTATE_UINT32(halted, CPUState),
527 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200528 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400529 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200530 .subsections = (const VMStateDescription*[]) {
531 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300532 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200533 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200534 }
535};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200536
pbrook9656f322008-07-01 20:01:19 +0000537#endif
538
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100539CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400540{
Andreas Färberbdc44642013-06-24 23:50:24 +0200541 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400542
Andreas Färberbdc44642013-06-24 23:50:24 +0200543 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100544 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200545 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100546 }
Glauber Costa950f1472009-06-09 12:15:18 -0400547 }
548
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400550}
551
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000552#if !defined(CONFIG_USER_ONLY)
553void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
554{
555 /* We only support one address space per cpu at the moment. */
556 assert(cpu->as == as);
557
Peter Maydell32857f42015-10-01 15:29:50 +0100558 if (cpu->cpu_ases) {
559 /* We've already registered the listener for our only AS */
560 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000561 }
Peter Maydell32857f42015-10-01 15:29:50 +0100562
563 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
564 cpu->cpu_ases[0].cpu = cpu;
565 cpu->cpu_ases[0].as = as;
566 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
567 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000568}
569#endif
570
Bharata B Raob7bca732015-06-23 19:31:13 -0700571#ifndef CONFIG_USER_ONLY
572static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
573
574static int cpu_get_free_index(Error **errp)
575{
576 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
577
578 if (cpu >= MAX_CPUMASK_BITS) {
579 error_setg(errp, "Trying to use more CPUs than max of %d",
580 MAX_CPUMASK_BITS);
581 return -1;
582 }
583
584 bitmap_set(cpu_index_map, cpu, 1);
585 return cpu;
586}
587
588void cpu_exec_exit(CPUState *cpu)
589{
590 if (cpu->cpu_index == -1) {
591 /* cpu_index was never allocated by this @cpu or was already freed. */
592 return;
593 }
594
595 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
596 cpu->cpu_index = -1;
597}
598#else
599
600static int cpu_get_free_index(Error **errp)
601{
602 CPUState *some_cpu;
603 int cpu_index = 0;
604
605 CPU_FOREACH(some_cpu) {
606 cpu_index++;
607 }
608 return cpu_index;
609}
610
611void cpu_exec_exit(CPUState *cpu)
612{
613}
614#endif
615
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700616void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000617{
Andreas Färberb170fce2013-01-20 20:23:22 +0100618 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000619 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700620 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000621
Eduardo Habkost291135b2015-04-27 17:00:33 -0300622#ifndef CONFIG_USER_ONLY
623 cpu->as = &address_space_memory;
624 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300625#endif
626
pbrookc2764712009-03-07 15:24:59 +0000627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700630 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
631 if (local_err) {
632 error_propagate(errp, local_err);
633#if defined(CONFIG_USER_ONLY)
634 cpu_list_unlock();
635#endif
636 return;
bellard6a00d602005-11-21 23:25:50 +0000637 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200638 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000639#if defined(CONFIG_USER_ONLY)
640 cpu_list_unlock();
641#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
644 }
pbrookb3c77242008-06-30 16:31:04 +0000645#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600646 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700647 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100648 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200649 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000650#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100651 if (cc->vmsd != NULL) {
652 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
653 }
bellardfd6ce8f2003-05-14 19:00:11 +0000654}
655
Paul Brook94df27f2010-02-28 23:47:45 +0000656#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200657static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000658{
659 tb_invalidate_phys_page_range(pc, pc + 1, 0);
660}
661#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200662static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400663{
Max Filippove8262a12013-09-27 22:29:17 +0400664 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
665 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000666 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100667 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400668 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400669}
bellardc27004e2005-01-03 23:35:10 +0000670#endif
bellardd720b932004-04-25 17:57:43 +0000671
Paul Brookc527ee82010-03-01 03:31:14 +0000672#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200673void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000674
675{
676}
677
Peter Maydell3ee887e2014-09-12 14:06:48 +0100678int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
679 int flags)
680{
681 return -ENOSYS;
682}
683
684void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
685{
686}
687
Andreas Färber75a34032013-09-02 16:57:02 +0200688int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000689 int flags, CPUWatchpoint **watchpoint)
690{
691 return -ENOSYS;
692}
693#else
pbrook6658ffb2007-03-16 23:58:11 +0000694/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200695int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000696 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000697{
aliguoric0ce9982008-11-25 22:13:57 +0000698 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000699
Peter Maydell05068c02014-09-12 14:06:48 +0100700 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700701 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200702 error_report("tried to set invalid watchpoint at %"
703 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000704 return -EINVAL;
705 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500706 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000707
aliguoria1d1bb32008-11-18 20:07:32 +0000708 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100709 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000710 wp->flags = flags;
711
aliguori2dc9f412008-11-18 20:56:59 +0000712 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200713 if (flags & BP_GDB) {
714 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
715 } else {
716 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
717 }
aliguoria1d1bb32008-11-18 20:07:32 +0000718
Andreas Färber31b030d2013-09-04 01:29:02 +0200719 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000720
721 if (watchpoint)
722 *watchpoint = wp;
723 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000724}
725
aliguoria1d1bb32008-11-18 20:07:32 +0000726/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200727int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000728 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000729{
aliguoria1d1bb32008-11-18 20:07:32 +0000730 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000731
Andreas Färberff4700b2013-08-26 18:23:18 +0200732 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100733 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000734 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200735 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000736 return 0;
737 }
738 }
aliguoria1d1bb32008-11-18 20:07:32 +0000739 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000740}
741
aliguoria1d1bb32008-11-18 20:07:32 +0000742/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200743void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000744{
Andreas Färberff4700b2013-08-26 18:23:18 +0200745 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000746
Andreas Färber31b030d2013-09-04 01:29:02 +0200747 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000748
Anthony Liguori7267c092011-08-20 22:09:37 -0500749 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000750}
751
aliguoria1d1bb32008-11-18 20:07:32 +0000752/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200753void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000754{
aliguoric0ce9982008-11-25 22:13:57 +0000755 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Andreas Färberff4700b2013-08-26 18:23:18 +0200757 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200758 if (wp->flags & mask) {
759 cpu_watchpoint_remove_by_ref(cpu, wp);
760 }
aliguoric0ce9982008-11-25 22:13:57 +0000761 }
aliguoria1d1bb32008-11-18 20:07:32 +0000762}
Peter Maydell05068c02014-09-12 14:06:48 +0100763
764/* Return true if this watchpoint address matches the specified
765 * access (ie the address range covered by the watchpoint overlaps
766 * partially or completely with the address range covered by the
767 * access).
768 */
769static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
770 vaddr addr,
771 vaddr len)
772{
773 /* We know the lengths are non-zero, but a little caution is
774 * required to avoid errors in the case where the range ends
775 * exactly at the top of the address space and so addr + len
776 * wraps round to zero.
777 */
778 vaddr wpend = wp->vaddr + wp->len - 1;
779 vaddr addrend = addr + len - 1;
780
781 return !(addr > wpend || wp->vaddr > addrend);
782}
783
Paul Brookc527ee82010-03-01 03:31:14 +0000784#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000785
786/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200787int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000788 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000789{
aliguoric0ce9982008-11-25 22:13:57 +0000790 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000791
Anthony Liguori7267c092011-08-20 22:09:37 -0500792 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000793
794 bp->pc = pc;
795 bp->flags = flags;
796
aliguori2dc9f412008-11-18 20:56:59 +0000797 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200798 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200799 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200800 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200801 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200802 }
aliguoria1d1bb32008-11-18 20:07:32 +0000803
Andreas Färberf0c3c502013-08-26 21:22:53 +0200804 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000805
Andreas Färber00b941e2013-06-29 18:55:54 +0200806 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000807 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200808 }
aliguoria1d1bb32008-11-18 20:07:32 +0000809 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000810}
811
812/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200813int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000814{
aliguoria1d1bb32008-11-18 20:07:32 +0000815 CPUBreakpoint *bp;
816
Andreas Färberf0c3c502013-08-26 21:22:53 +0200817 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000818 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200819 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000820 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000821 }
bellard4c3a88a2003-07-26 12:06:08 +0000822 }
aliguoria1d1bb32008-11-18 20:07:32 +0000823 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000824}
825
aliguoria1d1bb32008-11-18 20:07:32 +0000826/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200827void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000828{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200829 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
830
831 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000832
Anthony Liguori7267c092011-08-20 22:09:37 -0500833 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000834}
835
836/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200837void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000838{
aliguoric0ce9982008-11-25 22:13:57 +0000839 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000840
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200842 if (bp->flags & mask) {
843 cpu_breakpoint_remove_by_ref(cpu, bp);
844 }
aliguoric0ce9982008-11-25 22:13:57 +0000845 }
bellard4c3a88a2003-07-26 12:06:08 +0000846}
847
bellardc33a3462003-07-29 20:50:33 +0000848/* enable or disable single step mode. EXCP_DEBUG is returned by the
849 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200850void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000851{
Andreas Färbered2803d2013-06-21 20:20:45 +0200852 if (cpu->singlestep_enabled != enabled) {
853 cpu->singlestep_enabled = enabled;
854 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200855 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200856 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100857 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000858 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700859 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000860 }
bellardc33a3462003-07-29 20:50:33 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862}
863
Andreas Färbera47dddd2013-09-03 17:38:47 +0200864void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000865{
866 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000867 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000868
869 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000870 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000871 fprintf(stderr, "qemu: fatal: ");
872 vfprintf(stderr, fmt, ap);
873 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200874 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000875 if (qemu_log_enabled()) {
876 qemu_log("qemu: fatal: ");
877 qemu_log_vprintf(fmt, ap2);
878 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200879 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000880 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000881 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000882 }
pbrook493ae1f2007-11-23 16:53:59 +0000883 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000884 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200885#if defined(CONFIG_USER_ONLY)
886 {
887 struct sigaction act;
888 sigfillset(&act.sa_mask);
889 act.sa_handler = SIG_DFL;
890 sigaction(SIGABRT, &act, NULL);
891 }
892#endif
bellard75012672003-06-21 13:11:07 +0000893 abort();
894}
895
bellard01243112004-01-04 15:48:17 +0000896#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400897/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200898static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
899{
900 RAMBlock *block;
901
Paolo Bonzini43771532013-09-09 17:58:40 +0200902 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200903 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200904 goto found;
905 }
Mike Day0dc3f442013-09-05 14:41:35 -0400906 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200907 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200908 goto found;
909 }
910 }
911
912 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
913 abort();
914
915found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200916 /* It is safe to write mru_block outside the iothread lock. This
917 * is what happens:
918 *
919 * mru_block = xxx
920 * rcu_read_unlock()
921 * xxx removed from list
922 * rcu_read_lock()
923 * read mru_block
924 * mru_block = NULL;
925 * call_rcu(reclaim_ramblock, xxx);
926 * rcu_read_unlock()
927 *
928 * atomic_rcu_set is not needed here. The block was already published
929 * when it was placed into the list. Here we're just making an extra
930 * copy of the pointer.
931 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932 ram_list.mru_block = block;
933 return block;
934}
935
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200936static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000937{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700938 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200940 RAMBlock *block;
941 ram_addr_t end;
942
943 end = TARGET_PAGE_ALIGN(start + length);
944 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000945
Mike Day0dc3f442013-09-05 14:41:35 -0400946 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200947 block = qemu_get_ram_block(start);
948 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200949 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700950 CPU_FOREACH(cpu) {
951 tlb_reset_dirty(cpu, start1, length);
952 }
Mike Day0dc3f442013-09-05 14:41:35 -0400953 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200954}
955
956/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000957bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
958 ram_addr_t length,
959 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200960{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000961 unsigned long end, page;
962 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200963
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000964 if (length == 0) {
965 return false;
966 }
967
968 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
969 page = start >> TARGET_PAGE_BITS;
970 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
971 page, end - page);
972
973 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200974 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200975 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000976
977 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000978}
979
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100980/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200981hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200982 MemoryRegionSection *section,
983 target_ulong vaddr,
984 hwaddr paddr, hwaddr xlat,
985 int prot,
986 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000987{
Avi Kivitya8170e52012-10-23 12:30:10 +0200988 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000989 CPUWatchpoint *wp;
990
Blue Swirlcc5bea62012-04-14 14:56:48 +0000991 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000992 /* Normal RAM. */
993 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200994 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000995 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200996 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000997 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000999 }
1000 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001001 AddressSpaceDispatch *d;
1002
1003 d = atomic_rcu_read(&section->address_space->dispatch);
1004 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001005 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001006 }
1007
1008 /* Make accesses to pages with watchpoints go via the
1009 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001010 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001011 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001012 /* Avoid trapping reads of pages with a write breakpoint. */
1013 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001014 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001015 *address |= TLB_MMIO;
1016 break;
1017 }
1018 }
1019 }
1020
1021 return iotlb;
1022}
bellard9fa3e852004-01-04 18:06:42 +00001023#endif /* defined(CONFIG_USER_ONLY) */
1024
pbrooke2eef172008-06-08 01:09:01 +00001025#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001026
Anthony Liguoric227f092009-10-01 16:12:16 -05001027static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001028 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001029static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001030
Igor Mammedova2b257d2014-10-31 16:38:37 +00001031static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1032 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001033
1034/*
1035 * Set a custom physical guest memory alloator.
1036 * Accelerators with unusual needs may need this. Hopefully, we can
1037 * get rid of it eventually.
1038 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001039void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001040{
1041 phys_mem_alloc = alloc;
1042}
1043
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001044static uint16_t phys_section_add(PhysPageMap *map,
1045 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001046{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001047 /* The physical section number is ORed with a page-aligned
1048 * pointer to produce the iotlb entries. Thus it should
1049 * never overflow into the page-aligned value.
1050 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001051 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001052
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 if (map->sections_nb == map->sections_nb_alloc) {
1054 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1055 map->sections = g_renew(MemoryRegionSection, map->sections,
1056 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001057 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001058 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001059 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001061}
1062
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001063static void phys_section_destroy(MemoryRegion *mr)
1064{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001065 memory_region_unref(mr);
1066
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001067 if (mr->subpage) {
1068 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001069 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001070 g_free(subpage);
1071 }
1072}
1073
Paolo Bonzini60926662013-05-29 12:30:26 +02001074static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001075{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001076 while (map->sections_nb > 0) {
1077 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001078 phys_section_destroy(section->mr);
1079 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001080 g_free(map->sections);
1081 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001082}
1083
Avi Kivityac1970f2012-10-03 16:22:53 +02001084static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085{
1086 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001087 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001088 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001089 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001090 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001091 MemoryRegionSection subsection = {
1092 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001093 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001094 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001095 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096
Avi Kivityf3705d52012-03-08 16:16:34 +02001097 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098
Avi Kivityf3705d52012-03-08 16:16:34 +02001099 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001100 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001101 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001103 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001105 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001106 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 }
1108 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001110 subpage_register(subpage, start, end,
1111 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112}
1113
1114
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001115static void register_multipage(AddressSpaceDispatch *d,
1116 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001117{
Avi Kivitya8170e52012-10-23 12:30:10 +02001118 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001119 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001120 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1121 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001122
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001123 assert(num_pages);
1124 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001125}
1126
Avi Kivityac1970f2012-10-03 16:22:53 +02001127static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001128{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001129 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001130 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001131 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001133
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001134 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1135 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1136 - now.offset_within_address_space;
1137
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001138 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001139 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001140 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001141 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001142 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 while (int128_ne(remain.size, now.size)) {
1144 remain.size = int128_sub(remain.size, now.size);
1145 remain.offset_within_address_space += int128_get64(now.size);
1146 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001147 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001149 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001150 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001151 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001152 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001153 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001155 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001156 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 }
1158}
1159
Sheng Yang62a27442010-01-26 19:21:16 +08001160void qemu_flush_coalesced_mmio_buffer(void)
1161{
1162 if (kvm_enabled())
1163 kvm_flush_coalesced_mmio_buffer();
1164}
1165
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001166void qemu_mutex_lock_ramlist(void)
1167{
1168 qemu_mutex_lock(&ram_list.mutex);
1169}
1170
1171void qemu_mutex_unlock_ramlist(void)
1172{
1173 qemu_mutex_unlock(&ram_list.mutex);
1174}
1175
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001176#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001177
1178#include <sys/vfs.h>
1179
1180#define HUGETLBFS_MAGIC 0x958458f6
1181
Hu Taofc7a5802014-09-09 13:28:01 +08001182static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001183{
1184 struct statfs fs;
1185 int ret;
1186
1187 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001188 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001189 } while (ret != 0 && errno == EINTR);
1190
1191 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001192 error_setg_errno(errp, errno, "failed to get page size of file %s",
1193 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001194 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195 }
1196
1197 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001198 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199
1200 return fs.f_bsize;
1201}
1202
Alex Williamson04b16652010-07-02 11:13:17 -06001203static void *file_ram_alloc(RAMBlock *block,
1204 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001205 const char *path,
1206 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001207{
1208 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001209 char *sanitized_name;
1210 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001211 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001212 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001213 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001214 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215
Hu Taofc7a5802014-09-09 13:28:01 +08001216 hpagesize = gethugepagesize(path, &local_err);
1217 if (local_err) {
1218 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001219 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001220 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001221 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001222
1223 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001224 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1225 "or larger than huge page size 0x%" PRIx64,
1226 memory, hpagesize);
1227 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001228 }
1229
1230 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001231 error_setg(errp,
1232 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001233 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001234 }
1235
Peter Feiner8ca761f2013-03-04 13:54:25 -05001236 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001237 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001238 for (c = sanitized_name; *c != '\0'; c++) {
1239 if (*c == '/')
1240 *c = '_';
1241 }
1242
1243 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1244 sanitized_name);
1245 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001246
1247 fd = mkstemp(filename);
1248 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001249 error_setg_errno(errp, errno,
1250 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001251 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001252 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001253 }
1254 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001255 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001256
Chen Hanxiao9284f312015-07-24 11:12:03 +08001257 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001258
1259 /*
1260 * ftruncate is not supported by hugetlbfs in older
1261 * hosts, so don't bother bailing out on errors.
1262 * If anything goes wrong with it under other filesystems,
1263 * mmap will fail.
1264 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001265 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001266 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001267 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001268
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001269 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001270 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001271 error_setg_errno(errp, errno,
1272 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001273 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001274 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001275 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001276
1277 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001278 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001279 }
1280
Alex Williamson04b16652010-07-02 11:13:17 -06001281 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001282 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001283
1284error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001285 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001286}
1287#endif
1288
Mike Day0dc3f442013-09-05 14:41:35 -04001289/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001290static ram_addr_t find_ram_offset(ram_addr_t size)
1291{
Alex Williamson04b16652010-07-02 11:13:17 -06001292 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001293 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001294
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001295 assert(size != 0); /* it would hand out same offset multiple times */
1296
Mike Day0dc3f442013-09-05 14:41:35 -04001297 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001298 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001299 }
Alex Williamson04b16652010-07-02 11:13:17 -06001300
Mike Day0dc3f442013-09-05 14:41:35 -04001301 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001302 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001303
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001304 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001305
Mike Day0dc3f442013-09-05 14:41:35 -04001306 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001307 if (next_block->offset >= end) {
1308 next = MIN(next, next_block->offset);
1309 }
1310 }
1311 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001312 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001313 mingap = next - end;
1314 }
1315 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001316
1317 if (offset == RAM_ADDR_MAX) {
1318 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1319 (uint64_t)size);
1320 abort();
1321 }
1322
Alex Williamson04b16652010-07-02 11:13:17 -06001323 return offset;
1324}
1325
Juan Quintela652d7ec2012-07-20 10:37:54 +02001326ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001327{
Alex Williamsond17b5282010-06-25 11:08:38 -06001328 RAMBlock *block;
1329 ram_addr_t last = 0;
1330
Mike Day0dc3f442013-09-05 14:41:35 -04001331 rcu_read_lock();
1332 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001333 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001334 }
Mike Day0dc3f442013-09-05 14:41:35 -04001335 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001336 return last;
1337}
1338
Jason Baronddb97f12012-08-02 15:44:16 -04001339static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1340{
1341 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001342
1343 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001344 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001345 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1346 if (ret) {
1347 perror("qemu_madvise");
1348 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1349 "but dump_guest_core=off specified\n");
1350 }
1351 }
1352}
1353
Mike Day0dc3f442013-09-05 14:41:35 -04001354/* Called within an RCU critical section, or while the ramlist lock
1355 * is held.
1356 */
Hu Tao20cfe882014-04-02 15:13:26 +08001357static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001358{
Hu Tao20cfe882014-04-02 15:13:26 +08001359 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001360
Mike Day0dc3f442013-09-05 14:41:35 -04001361 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001362 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001363 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001364 }
1365 }
Hu Tao20cfe882014-04-02 15:13:26 +08001366
1367 return NULL;
1368}
1369
Mike Dayae3a7042013-09-05 14:41:35 -04001370/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001371void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1372{
Mike Dayae3a7042013-09-05 14:41:35 -04001373 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001374
Mike Day0dc3f442013-09-05 14:41:35 -04001375 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001376 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001377 assert(new_block);
1378 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001379
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001380 if (dev) {
1381 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001382 if (id) {
1383 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001384 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001385 }
1386 }
1387 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1388
Mike Day0dc3f442013-09-05 14:41:35 -04001389 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001390 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001391 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1392 new_block->idstr);
1393 abort();
1394 }
1395 }
Mike Day0dc3f442013-09-05 14:41:35 -04001396 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001397}
1398
Mike Dayae3a7042013-09-05 14:41:35 -04001399/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001400void qemu_ram_unset_idstr(ram_addr_t addr)
1401{
Mike Dayae3a7042013-09-05 14:41:35 -04001402 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001403
Mike Dayae3a7042013-09-05 14:41:35 -04001404 /* FIXME: arch_init.c assumes that this is not called throughout
1405 * migration. Ignore the problem since hot-unplug during migration
1406 * does not work anyway.
1407 */
1408
Mike Day0dc3f442013-09-05 14:41:35 -04001409 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001410 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001411 if (block) {
1412 memset(block->idstr, 0, sizeof(block->idstr));
1413 }
Mike Day0dc3f442013-09-05 14:41:35 -04001414 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001415}
1416
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001417static int memory_try_enable_merging(void *addr, size_t len)
1418{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001419 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001420 /* disabled by the user */
1421 return 0;
1422 }
1423
1424 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1425}
1426
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001427/* Only legal before guest might have detected the memory size: e.g. on
1428 * incoming migration, or right after reset.
1429 *
1430 * As memory core doesn't know how is memory accessed, it is up to
1431 * resize callback to update device state and/or add assertions to detect
1432 * misuse, if necessary.
1433 */
1434int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1435{
1436 RAMBlock *block = find_ram_block(base);
1437
1438 assert(block);
1439
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001440 newsize = TARGET_PAGE_ALIGN(newsize);
1441
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001442 if (block->used_length == newsize) {
1443 return 0;
1444 }
1445
1446 if (!(block->flags & RAM_RESIZEABLE)) {
1447 error_setg_errno(errp, EINVAL,
1448 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1449 " in != 0x" RAM_ADDR_FMT, block->idstr,
1450 newsize, block->used_length);
1451 return -EINVAL;
1452 }
1453
1454 if (block->max_length < newsize) {
1455 error_setg_errno(errp, EINVAL,
1456 "Length too large: %s: 0x" RAM_ADDR_FMT
1457 " > 0x" RAM_ADDR_FMT, block->idstr,
1458 newsize, block->max_length);
1459 return -EINVAL;
1460 }
1461
1462 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1463 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001464 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1465 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001466 memory_region_set_size(block->mr, newsize);
1467 if (block->resized) {
1468 block->resized(block->idstr, newsize, block->host);
1469 }
1470 return 0;
1471}
1472
Hu Taoef701d72014-09-09 13:27:54 +08001473static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001474{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001475 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001476 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001477 ram_addr_t old_ram_size, new_ram_size;
1478
1479 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001480
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001481 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001482 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001483
1484 if (!new_block->host) {
1485 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001486 xen_ram_alloc(new_block->offset, new_block->max_length,
1487 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001488 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001489 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001490 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001491 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001492 error_setg_errno(errp, errno,
1493 "cannot set up guest memory '%s'",
1494 memory_region_name(new_block->mr));
1495 qemu_mutex_unlock_ramlist();
1496 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001497 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001498 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001499 }
1500 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001501
Li Zhijiandd631692015-07-02 20:18:06 +08001502 new_ram_size = MAX(old_ram_size,
1503 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1504 if (new_ram_size > old_ram_size) {
1505 migration_bitmap_extend(old_ram_size, new_ram_size);
1506 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001507 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1508 * QLIST (which has an RCU-friendly variant) does not have insertion at
1509 * tail, so save the last element in last_block.
1510 */
Mike Day0dc3f442013-09-05 14:41:35 -04001511 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001512 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001513 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001514 break;
1515 }
1516 }
1517 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001518 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001519 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001520 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001521 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001522 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001523 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001524 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001525
Mike Day0dc3f442013-09-05 14:41:35 -04001526 /* Write list before version */
1527 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001528 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001529 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001530
Juan Quintela2152f5c2013-10-08 13:52:02 +02001531 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1532
1533 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001534 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001535
1536 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001537 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1538 ram_list.dirty_memory[i] =
1539 bitmap_zero_extend(ram_list.dirty_memory[i],
1540 old_ram_size, new_ram_size);
1541 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001542 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001543 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001544 new_block->used_length,
1545 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001546
Paolo Bonzinia904c912015-01-21 16:18:35 +01001547 if (new_block->host) {
1548 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1549 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1550 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1551 if (kvm_enabled()) {
1552 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1553 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001554 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001555
1556 return new_block->offset;
1557}
1558
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001559#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001560ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001561 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001562 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001563{
1564 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001565 ram_addr_t addr;
1566 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001567
1568 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001569 error_setg(errp, "-mem-path not supported with Xen");
1570 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001571 }
1572
1573 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1574 /*
1575 * file_ram_alloc() needs to allocate just like
1576 * phys_mem_alloc, but we haven't bothered to provide
1577 * a hook there.
1578 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001579 error_setg(errp,
1580 "-mem-path not supported with this accelerator");
1581 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001582 }
1583
1584 size = TARGET_PAGE_ALIGN(size);
1585 new_block = g_malloc0(sizeof(*new_block));
1586 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001587 new_block->used_length = size;
1588 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001589 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001590 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001591 new_block->host = file_ram_alloc(new_block, size,
1592 mem_path, errp);
1593 if (!new_block->host) {
1594 g_free(new_block);
1595 return -1;
1596 }
1597
Hu Taoef701d72014-09-09 13:27:54 +08001598 addr = ram_block_add(new_block, &local_err);
1599 if (local_err) {
1600 g_free(new_block);
1601 error_propagate(errp, local_err);
1602 return -1;
1603 }
1604 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001605}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001606#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001607
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001608static
1609ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1610 void (*resized)(const char*,
1611 uint64_t length,
1612 void *host),
1613 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001614 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001615{
1616 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001617 ram_addr_t addr;
1618 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001619
1620 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001621 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001622 new_block = g_malloc0(sizeof(*new_block));
1623 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001624 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001625 new_block->used_length = size;
1626 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001627 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001628 new_block->fd = -1;
1629 new_block->host = host;
1630 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001631 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001632 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001633 if (resizeable) {
1634 new_block->flags |= RAM_RESIZEABLE;
1635 }
Hu Taoef701d72014-09-09 13:27:54 +08001636 addr = ram_block_add(new_block, &local_err);
1637 if (local_err) {
1638 g_free(new_block);
1639 error_propagate(errp, local_err);
1640 return -1;
1641 }
1642 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001643}
1644
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001645ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1646 MemoryRegion *mr, Error **errp)
1647{
1648 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1649}
1650
Hu Taoef701d72014-09-09 13:27:54 +08001651ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001652{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001653 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1654}
1655
1656ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1657 void (*resized)(const char*,
1658 uint64_t length,
1659 void *host),
1660 MemoryRegion *mr, Error **errp)
1661{
1662 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001663}
bellarde9a1ab12007-02-08 23:08:38 +00001664
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001665void qemu_ram_free_from_ptr(ram_addr_t addr)
1666{
1667 RAMBlock *block;
1668
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001669 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001670 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001671 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001672 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001673 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001674 /* Write list before version */
1675 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001676 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001677 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001678 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001679 }
1680 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001681 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001682}
1683
Paolo Bonzini43771532013-09-09 17:58:40 +02001684static void reclaim_ramblock(RAMBlock *block)
1685{
1686 if (block->flags & RAM_PREALLOC) {
1687 ;
1688 } else if (xen_enabled()) {
1689 xen_invalidate_map_cache_entry(block->host);
1690#ifndef _WIN32
1691 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001692 if (block->flags & RAM_FILE) {
1693 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001694 } else {
1695 munmap(block->host, block->max_length);
1696 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001697 close(block->fd);
1698#endif
1699 } else {
1700 qemu_anon_ram_free(block->host, block->max_length);
1701 }
1702 g_free(block);
1703}
1704
Anthony Liguoric227f092009-10-01 16:12:16 -05001705void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001706{
Alex Williamson04b16652010-07-02 11:13:17 -06001707 RAMBlock *block;
1708
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001709 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001710 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001711 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001712 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001713 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001714 /* Write list before version */
1715 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001716 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001717 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001718 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001719 }
1720 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001721 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001722}
1723
Huang Yingcd19cfa2011-03-02 08:56:19 +01001724#ifndef _WIN32
1725void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1726{
1727 RAMBlock *block;
1728 ram_addr_t offset;
1729 int flags;
1730 void *area, *vaddr;
1731
Mike Day0dc3f442013-09-05 14:41:35 -04001732 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001733 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001734 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001735 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001736 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001737 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001738 } else if (xen_enabled()) {
1739 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001740 } else {
1741 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001742 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001743 flags |= (block->flags & RAM_SHARED ?
1744 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001745 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1746 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001747 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001748 /*
1749 * Remap needs to match alloc. Accelerators that
1750 * set phys_mem_alloc never remap. If they did,
1751 * we'd need a remap hook here.
1752 */
1753 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1754
Huang Yingcd19cfa2011-03-02 08:56:19 +01001755 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1756 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1757 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001758 }
1759 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001760 fprintf(stderr, "Could not remap addr: "
1761 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001762 length, addr);
1763 exit(1);
1764 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001765 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001766 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001767 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001768 }
1769 }
1770}
1771#endif /* !_WIN32 */
1772
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001773int qemu_get_ram_fd(ram_addr_t addr)
1774{
Mike Dayae3a7042013-09-05 14:41:35 -04001775 RAMBlock *block;
1776 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001777
Mike Day0dc3f442013-09-05 14:41:35 -04001778 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001779 block = qemu_get_ram_block(addr);
1780 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001781 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001782 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001783}
1784
Damjan Marion3fd74b82014-06-26 23:01:32 +02001785void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1786{
Mike Dayae3a7042013-09-05 14:41:35 -04001787 RAMBlock *block;
1788 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001789
Mike Day0dc3f442013-09-05 14:41:35 -04001790 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001791 block = qemu_get_ram_block(addr);
1792 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001793 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001794 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001795}
1796
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001797/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001798 * This should not be used for general purpose DMA. Use address_space_map
1799 * or address_space_rw instead. For local memory (e.g. video ram) that the
1800 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001801 *
1802 * By the time this function returns, the returned pointer is not protected
1803 * by RCU anymore. If the caller is not within an RCU critical section and
1804 * does not hold the iothread lock, it must have other means of protecting the
1805 * pointer, such as a reference to the region that includes the incoming
1806 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001807 */
1808void *qemu_get_ram_ptr(ram_addr_t addr)
1809{
Mike Dayae3a7042013-09-05 14:41:35 -04001810 RAMBlock *block;
1811 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001812
Mike Day0dc3f442013-09-05 14:41:35 -04001813 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001814 block = qemu_get_ram_block(addr);
1815
1816 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001817 /* We need to check if the requested address is in the RAM
1818 * because we don't want to map the entire memory in QEMU.
1819 * In that case just map until the end of the page.
1820 */
1821 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001822 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001823 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001824 }
Mike Dayae3a7042013-09-05 14:41:35 -04001825
1826 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001827 }
Mike Dayae3a7042013-09-05 14:41:35 -04001828 ptr = ramblock_ptr(block, addr - block->offset);
1829
Mike Day0dc3f442013-09-05 14:41:35 -04001830unlock:
1831 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001832 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001833}
1834
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001835/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001836 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001837 *
1838 * By the time this function returns, the returned pointer is not protected
1839 * by RCU anymore. If the caller is not within an RCU critical section and
1840 * does not hold the iothread lock, it must have other means of protecting the
1841 * pointer, such as a reference to the region that includes the incoming
1842 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001843 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001844static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001845{
Mike Dayae3a7042013-09-05 14:41:35 -04001846 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001847 if (*size == 0) {
1848 return NULL;
1849 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001850 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001851 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001852 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001853 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001854 rcu_read_lock();
1855 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001856 if (addr - block->offset < block->max_length) {
1857 if (addr - block->offset + *size > block->max_length)
1858 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001859 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001860 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001861 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001862 }
1863 }
1864
1865 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1866 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001867 }
1868}
1869
Paolo Bonzini7443b432013-06-03 12:44:02 +02001870/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001871 * (typically a TLB entry) back to a ram offset.
1872 *
1873 * By the time this function returns, the returned pointer is not protected
1874 * by RCU anymore. If the caller is not within an RCU critical section and
1875 * does not hold the iothread lock, it must have other means of protecting the
1876 * pointer, such as a reference to the region that includes the incoming
1877 * ram_addr_t.
1878 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001879MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001880{
pbrook94a6b542009-04-11 17:15:54 +00001881 RAMBlock *block;
1882 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001883 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001884
Jan Kiszka868bb332011-06-21 22:59:09 +02001885 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001886 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001887 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001888 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001889 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001890 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001891 }
1892
Mike Day0dc3f442013-09-05 14:41:35 -04001893 rcu_read_lock();
1894 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001895 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001896 goto found;
1897 }
1898
Mike Day0dc3f442013-09-05 14:41:35 -04001899 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001900 /* This case append when the block is not mapped. */
1901 if (block->host == NULL) {
1902 continue;
1903 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001904 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001905 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001906 }
pbrook94a6b542009-04-11 17:15:54 +00001907 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001908
Mike Day0dc3f442013-09-05 14:41:35 -04001909 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001910 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001911
1912found:
1913 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001914 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001915 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001916 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001917}
Alex Williamsonf471a172010-06-11 11:11:42 -06001918
Avi Kivitya8170e52012-10-23 12:30:10 +02001919static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001920 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001921{
Juan Quintela52159192013-10-08 12:44:04 +02001922 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001923 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001924 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001925 switch (size) {
1926 case 1:
1927 stb_p(qemu_get_ram_ptr(ram_addr), val);
1928 break;
1929 case 2:
1930 stw_p(qemu_get_ram_ptr(ram_addr), val);
1931 break;
1932 case 4:
1933 stl_p(qemu_get_ram_ptr(ram_addr), val);
1934 break;
1935 default:
1936 abort();
1937 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001938 /* Set both VGA and migration bits for simplicity and to remove
1939 * the notdirty callback faster.
1940 */
1941 cpu_physical_memory_set_dirty_range(ram_addr, size,
1942 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001943 /* we remove the notdirty callback only if the code has been
1944 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001945 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001946 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001947 }
bellard1ccde1c2004-02-06 19:46:14 +00001948}
1949
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001950static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1951 unsigned size, bool is_write)
1952{
1953 return is_write;
1954}
1955
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001956static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001957 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001958 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001959 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001960};
1961
pbrook0f459d12008-06-09 00:20:13 +00001962/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001963static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001964{
Andreas Färber93afead2013-08-26 03:41:01 +02001965 CPUState *cpu = current_cpu;
1966 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001967 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001968 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001969 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001970 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001971
Andreas Färberff4700b2013-08-26 18:23:18 +02001972 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001973 /* We re-entered the check after replacing the TB. Now raise
1974 * the debug interrupt so that is will trigger after the
1975 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001976 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001977 return;
1978 }
Andreas Färber93afead2013-08-26 03:41:01 +02001979 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001980 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001981 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1982 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001983 if (flags == BP_MEM_READ) {
1984 wp->flags |= BP_WATCHPOINT_HIT_READ;
1985 } else {
1986 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1987 }
1988 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001989 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001990 if (!cpu->watchpoint_hit) {
1991 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001992 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001993 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001994 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001995 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001996 } else {
1997 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001998 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001999 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002000 }
aliguori06d55cc2008-11-18 20:24:06 +00002001 }
aliguori6e140f22008-11-18 20:37:55 +00002002 } else {
2003 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002004 }
2005 }
2006}
2007
pbrook6658ffb2007-03-16 23:58:11 +00002008/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2009 so these check for a hit then pass through to the normal out-of-line
2010 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002011static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2012 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002013{
Peter Maydell66b9b432015-04-26 16:49:24 +01002014 MemTxResult res;
2015 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002016
Peter Maydell66b9b432015-04-26 16:49:24 +01002017 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002018 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002019 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002020 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002021 break;
2022 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002023 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002024 break;
2025 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002026 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002027 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002028 default: abort();
2029 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002030 *pdata = data;
2031 return res;
2032}
2033
2034static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2035 uint64_t val, unsigned size,
2036 MemTxAttrs attrs)
2037{
2038 MemTxResult res;
2039
2040 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2041 switch (size) {
2042 case 1:
2043 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2044 break;
2045 case 2:
2046 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2047 break;
2048 case 4:
2049 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2050 break;
2051 default: abort();
2052 }
2053 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002054}
2055
Avi Kivity1ec9b902012-01-02 12:47:48 +02002056static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002057 .read_with_attrs = watch_mem_read,
2058 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002059 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002060};
pbrook6658ffb2007-03-16 23:58:11 +00002061
Peter Maydellf25a49e2015-04-26 16:49:24 +01002062static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2063 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002064{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002065 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002066 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002067 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002068
blueswir1db7b5422007-05-26 17:36:03 +00002069#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002070 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002071 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002072#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002073 res = address_space_read(subpage->as, addr + subpage->base,
2074 attrs, buf, len);
2075 if (res) {
2076 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002077 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002078 switch (len) {
2079 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002080 *data = ldub_p(buf);
2081 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002082 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002083 *data = lduw_p(buf);
2084 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002085 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002086 *data = ldl_p(buf);
2087 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002088 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002089 *data = ldq_p(buf);
2090 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002091 default:
2092 abort();
2093 }
blueswir1db7b5422007-05-26 17:36:03 +00002094}
2095
Peter Maydellf25a49e2015-04-26 16:49:24 +01002096static MemTxResult subpage_write(void *opaque, hwaddr addr,
2097 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002098{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002099 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002100 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002101
blueswir1db7b5422007-05-26 17:36:03 +00002102#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002103 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002104 " value %"PRIx64"\n",
2105 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002106#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002107 switch (len) {
2108 case 1:
2109 stb_p(buf, value);
2110 break;
2111 case 2:
2112 stw_p(buf, value);
2113 break;
2114 case 4:
2115 stl_p(buf, value);
2116 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002117 case 8:
2118 stq_p(buf, value);
2119 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002120 default:
2121 abort();
2122 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002123 return address_space_write(subpage->as, addr + subpage->base,
2124 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002125}
2126
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002127static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002128 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002129{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002130 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002131#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002132 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002133 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002134#endif
2135
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002136 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002137 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002138}
2139
Avi Kivity70c68e42012-01-02 12:32:48 +02002140static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002141 .read_with_attrs = subpage_read,
2142 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002143 .impl.min_access_size = 1,
2144 .impl.max_access_size = 8,
2145 .valid.min_access_size = 1,
2146 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002147 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002148 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002149};
2150
Anthony Liguoric227f092009-10-01 16:12:16 -05002151static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002152 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002153{
2154 int idx, eidx;
2155
2156 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2157 return -1;
2158 idx = SUBPAGE_IDX(start);
2159 eidx = SUBPAGE_IDX(end);
2160#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002161 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2162 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002163#endif
blueswir1db7b5422007-05-26 17:36:03 +00002164 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002165 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002166 }
2167
2168 return 0;
2169}
2170
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002171static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002172{
Anthony Liguoric227f092009-10-01 16:12:16 -05002173 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002174
Anthony Liguori7267c092011-08-20 22:09:37 -05002175 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002176
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002177 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002178 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002179 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002180 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002181 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002182#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002183 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2184 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002185#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002186 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002187
2188 return mmio;
2189}
2190
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002191static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2192 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002193{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002194 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002195 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002196 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002197 .mr = mr,
2198 .offset_within_address_space = 0,
2199 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002200 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002201 };
2202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002203 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002204}
2205
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002206MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002207{
Peter Maydell32857f42015-10-01 15:29:50 +01002208 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2209 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002210 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002211
2212 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002213}
2214
Avi Kivitye9179ce2009-06-14 11:38:52 +03002215static void io_mem_init(void)
2216{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002217 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002218 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002219 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002220 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002221 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002222 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002223 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002224}
2225
Avi Kivityac1970f2012-10-03 16:22:53 +02002226static void mem_begin(MemoryListener *listener)
2227{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002228 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002229 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2230 uint16_t n;
2231
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002232 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002233 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002234 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002235 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002236 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002237 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002238 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002239 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002240
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002241 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002242 d->as = as;
2243 as->next_dispatch = d;
2244}
2245
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002246static void address_space_dispatch_free(AddressSpaceDispatch *d)
2247{
2248 phys_sections_free(&d->map);
2249 g_free(d);
2250}
2251
Paolo Bonzini00752702013-05-29 12:13:54 +02002252static void mem_commit(MemoryListener *listener)
2253{
2254 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002255 AddressSpaceDispatch *cur = as->dispatch;
2256 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002257
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002258 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002259
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002260 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002261 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002262 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002263 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002264}
2265
Avi Kivity1d711482012-10-02 18:54:45 +02002266static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002267{
Peter Maydell32857f42015-10-01 15:29:50 +01002268 CPUAddressSpace *cpuas;
2269 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002270
2271 /* since each CPU stores ram addresses in its TLB cache, we must
2272 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002273 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2274 cpu_reloading_memory_map();
2275 /* The CPU and TLB are protected by the iothread lock.
2276 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2277 * may have split the RCU critical section.
2278 */
2279 d = atomic_rcu_read(&cpuas->as->dispatch);
2280 cpuas->memory_dispatch = d;
2281 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002282}
2283
Avi Kivityac1970f2012-10-03 16:22:53 +02002284void address_space_init_dispatch(AddressSpace *as)
2285{
Paolo Bonzini00752702013-05-29 12:13:54 +02002286 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002287 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002288 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002289 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002290 .region_add = mem_add,
2291 .region_nop = mem_add,
2292 .priority = 0,
2293 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002294 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002295}
2296
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002297void address_space_unregister(AddressSpace *as)
2298{
2299 memory_listener_unregister(&as->dispatch_listener);
2300}
2301
Avi Kivity83f3c252012-10-07 12:59:55 +02002302void address_space_destroy_dispatch(AddressSpace *as)
2303{
2304 AddressSpaceDispatch *d = as->dispatch;
2305
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002306 atomic_rcu_set(&as->dispatch, NULL);
2307 if (d) {
2308 call_rcu(d, address_space_dispatch_free, rcu);
2309 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002310}
2311
Avi Kivity62152b82011-07-26 14:26:14 +03002312static void memory_map_init(void)
2313{
Anthony Liguori7267c092011-08-20 22:09:37 -05002314 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002315
Paolo Bonzini57271d62013-11-07 17:14:37 +01002316 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002317 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002318
Anthony Liguori7267c092011-08-20 22:09:37 -05002319 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002320 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2321 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002322 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002323}
2324
2325MemoryRegion *get_system_memory(void)
2326{
2327 return system_memory;
2328}
2329
Avi Kivity309cb472011-08-08 16:09:03 +03002330MemoryRegion *get_system_io(void)
2331{
2332 return system_io;
2333}
2334
pbrooke2eef172008-06-08 01:09:01 +00002335#endif /* !defined(CONFIG_USER_ONLY) */
2336
bellard13eb76e2004-01-24 15:23:36 +00002337/* physical memory access (slow version, mainly for debug) */
2338#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002339int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002340 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002341{
2342 int l, flags;
2343 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002344 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002345
2346 while (len > 0) {
2347 page = addr & TARGET_PAGE_MASK;
2348 l = (page + TARGET_PAGE_SIZE) - addr;
2349 if (l > len)
2350 l = len;
2351 flags = page_get_flags(page);
2352 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002353 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002354 if (is_write) {
2355 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002356 return -1;
bellard579a97f2007-11-11 14:26:47 +00002357 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002358 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002359 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002360 memcpy(p, buf, l);
2361 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002362 } else {
2363 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002364 return -1;
bellard579a97f2007-11-11 14:26:47 +00002365 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002366 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002367 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002368 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002369 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002370 }
2371 len -= l;
2372 buf += l;
2373 addr += l;
2374 }
Paul Brooka68fe892010-03-01 00:08:59 +00002375 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002376}
bellard8df1cd02005-01-28 22:37:22 +00002377
bellard13eb76e2004-01-24 15:23:36 +00002378#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002379
Paolo Bonzini845b6212015-03-23 11:45:53 +01002380static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002381 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002382{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002383 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2384 /* No early return if dirty_log_mask is or becomes 0, because
2385 * cpu_physical_memory_set_dirty_range will still call
2386 * xen_modified_memory.
2387 */
2388 if (dirty_log_mask) {
2389 dirty_log_mask =
2390 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002391 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002392 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2393 tb_invalidate_phys_range(addr, addr + length);
2394 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2395 }
2396 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002397}
2398
Richard Henderson23326162013-07-08 14:55:59 -07002399static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002400{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002401 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002402
2403 /* Regions are assumed to support 1-4 byte accesses unless
2404 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002405 if (access_size_max == 0) {
2406 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002407 }
Richard Henderson23326162013-07-08 14:55:59 -07002408
2409 /* Bound the maximum access by the alignment of the address. */
2410 if (!mr->ops->impl.unaligned) {
2411 unsigned align_size_max = addr & -addr;
2412 if (align_size_max != 0 && align_size_max < access_size_max) {
2413 access_size_max = align_size_max;
2414 }
2415 }
2416
2417 /* Don't attempt accesses larger than the maximum. */
2418 if (l > access_size_max) {
2419 l = access_size_max;
2420 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002421 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002422
2423 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002424}
2425
Jan Kiszka4840f102015-06-18 18:47:22 +02002426static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002427{
Jan Kiszka4840f102015-06-18 18:47:22 +02002428 bool unlocked = !qemu_mutex_iothread_locked();
2429 bool release_lock = false;
2430
2431 if (unlocked && mr->global_locking) {
2432 qemu_mutex_lock_iothread();
2433 unlocked = false;
2434 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002435 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002436 if (mr->flush_coalesced_mmio) {
2437 if (unlocked) {
2438 qemu_mutex_lock_iothread();
2439 }
2440 qemu_flush_coalesced_mmio_buffer();
2441 if (unlocked) {
2442 qemu_mutex_unlock_iothread();
2443 }
2444 }
2445
2446 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002447}
2448
Peter Maydell5c9eb022015-04-26 16:49:24 +01002449MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2450 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002451{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002452 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002453 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002454 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002455 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002456 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002457 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002458 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002459
Paolo Bonzini41063e12015-03-18 14:21:43 +01002460 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002461 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002462 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002463 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002464
bellard13eb76e2004-01-24 15:23:36 +00002465 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002466 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002467 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002468 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002469 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002470 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002471 switch (l) {
2472 case 8:
2473 /* 64 bit write access */
2474 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002475 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2476 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002477 break;
2478 case 4:
bellard1c213d12005-09-03 10:49:04 +00002479 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002480 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002481 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2482 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002483 break;
2484 case 2:
bellard1c213d12005-09-03 10:49:04 +00002485 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002486 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002487 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2488 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002489 break;
2490 case 1:
bellard1c213d12005-09-03 10:49:04 +00002491 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002492 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002493 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2494 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002495 break;
2496 default:
2497 abort();
bellard13eb76e2004-01-24 15:23:36 +00002498 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002499 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002500 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002501 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002502 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002503 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002504 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002505 }
2506 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002507 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002508 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002509 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002510 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002511 switch (l) {
2512 case 8:
2513 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002514 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2515 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002516 stq_p(buf, val);
2517 break;
2518 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002519 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002520 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2521 attrs);
bellardc27004e2005-01-03 23:35:10 +00002522 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002523 break;
2524 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002525 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002526 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2527 attrs);
bellardc27004e2005-01-03 23:35:10 +00002528 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002529 break;
2530 case 1:
bellard1c213d12005-09-03 10:49:04 +00002531 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002532 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2533 attrs);
bellardc27004e2005-01-03 23:35:10 +00002534 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002535 break;
2536 default:
2537 abort();
bellard13eb76e2004-01-24 15:23:36 +00002538 }
2539 } else {
2540 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002541 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002542 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002543 }
2544 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002545
2546 if (release_lock) {
2547 qemu_mutex_unlock_iothread();
2548 release_lock = false;
2549 }
2550
bellard13eb76e2004-01-24 15:23:36 +00002551 len -= l;
2552 buf += l;
2553 addr += l;
2554 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002555 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002556
Peter Maydell3b643492015-04-26 16:49:23 +01002557 return result;
bellard13eb76e2004-01-24 15:23:36 +00002558}
bellard8df1cd02005-01-28 22:37:22 +00002559
Peter Maydell5c9eb022015-04-26 16:49:24 +01002560MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2561 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002562{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002563 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002564}
2565
Peter Maydell5c9eb022015-04-26 16:49:24 +01002566MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2567 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002568{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002569 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002570}
2571
2572
Avi Kivitya8170e52012-10-23 12:30:10 +02002573void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002574 int len, int is_write)
2575{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002576 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2577 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002578}
2579
Alexander Graf582b55a2013-12-11 14:17:44 +01002580enum write_rom_type {
2581 WRITE_DATA,
2582 FLUSH_CACHE,
2583};
2584
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002585static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002586 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002587{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002588 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002589 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002590 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002591 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002592
Paolo Bonzini41063e12015-03-18 14:21:43 +01002593 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002594 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002595 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002596 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002597
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002598 if (!(memory_region_is_ram(mr) ||
2599 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002600 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002601 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002602 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002603 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002604 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002605 switch (type) {
2606 case WRITE_DATA:
2607 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002608 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002609 break;
2610 case FLUSH_CACHE:
2611 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2612 break;
2613 }
bellardd0ecd2a2006-04-23 17:14:48 +00002614 }
2615 len -= l;
2616 buf += l;
2617 addr += l;
2618 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002619 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002620}
2621
Alexander Graf582b55a2013-12-11 14:17:44 +01002622/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002623void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002624 const uint8_t *buf, int len)
2625{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002626 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002627}
2628
2629void cpu_flush_icache_range(hwaddr start, int len)
2630{
2631 /*
2632 * This function should do the same thing as an icache flush that was
2633 * triggered from within the guest. For TCG we are always cache coherent,
2634 * so there is no need to flush anything. For KVM / Xen we need to flush
2635 * the host's instruction cache at least.
2636 */
2637 if (tcg_enabled()) {
2638 return;
2639 }
2640
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002641 cpu_physical_memory_write_rom_internal(&address_space_memory,
2642 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002643}
2644
aliguori6d16c2f2009-01-22 16:59:11 +00002645typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002646 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002647 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002648 hwaddr addr;
2649 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002650 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002651} BounceBuffer;
2652
2653static BounceBuffer bounce;
2654
aliguoriba223c22009-01-22 16:59:16 +00002655typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002656 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002657 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002658} MapClient;
2659
Fam Zheng38e047b2015-03-16 17:03:35 +08002660QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002661static QLIST_HEAD(map_client_list, MapClient) map_client_list
2662 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002663
Fam Zhenge95205e2015-03-16 17:03:37 +08002664static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002665{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002666 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002667 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002668}
2669
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002670static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002671{
2672 MapClient *client;
2673
Blue Swirl72cf2d42009-09-12 07:36:22 +00002674 while (!QLIST_EMPTY(&map_client_list)) {
2675 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002676 qemu_bh_schedule(client->bh);
2677 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002678 }
2679}
2680
Fam Zhenge95205e2015-03-16 17:03:37 +08002681void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002682{
2683 MapClient *client = g_malloc(sizeof(*client));
2684
Fam Zheng38e047b2015-03-16 17:03:35 +08002685 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002686 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002687 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002688 if (!atomic_read(&bounce.in_use)) {
2689 cpu_notify_map_clients_locked();
2690 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002691 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002692}
2693
Fam Zheng38e047b2015-03-16 17:03:35 +08002694void cpu_exec_init_all(void)
2695{
2696 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002697 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002698 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002699 qemu_mutex_init(&map_client_list_lock);
2700}
2701
Fam Zhenge95205e2015-03-16 17:03:37 +08002702void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002703{
Fam Zhenge95205e2015-03-16 17:03:37 +08002704 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002705
Fam Zhenge95205e2015-03-16 17:03:37 +08002706 qemu_mutex_lock(&map_client_list_lock);
2707 QLIST_FOREACH(client, &map_client_list, link) {
2708 if (client->bh == bh) {
2709 cpu_unregister_map_client_do(client);
2710 break;
2711 }
2712 }
2713 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002714}
2715
2716static void cpu_notify_map_clients(void)
2717{
Fam Zheng38e047b2015-03-16 17:03:35 +08002718 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002719 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002720 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002721}
2722
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002723bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2724{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002725 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002726 hwaddr l, xlat;
2727
Paolo Bonzini41063e12015-03-18 14:21:43 +01002728 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002729 while (len > 0) {
2730 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002731 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2732 if (!memory_access_is_direct(mr, is_write)) {
2733 l = memory_access_size(mr, l, addr);
2734 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002735 return false;
2736 }
2737 }
2738
2739 len -= l;
2740 addr += l;
2741 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002742 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002743 return true;
2744}
2745
aliguori6d16c2f2009-01-22 16:59:11 +00002746/* Map a physical memory region into a host virtual address.
2747 * May map a subset of the requested range, given by and returned in *plen.
2748 * May return NULL if resources needed to perform the mapping are exhausted.
2749 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002750 * Use cpu_register_map_client() to know when retrying the map operation is
2751 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002752 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002753void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002754 hwaddr addr,
2755 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002756 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002757{
Avi Kivitya8170e52012-10-23 12:30:10 +02002758 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002759 hwaddr done = 0;
2760 hwaddr l, xlat, base;
2761 MemoryRegion *mr, *this_mr;
2762 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002763
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002764 if (len == 0) {
2765 return NULL;
2766 }
aliguori6d16c2f2009-01-22 16:59:11 +00002767
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002768 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002769 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002770 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002771
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002772 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002773 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002774 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002775 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002776 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002777 /* Avoid unbounded allocations */
2778 l = MIN(l, TARGET_PAGE_SIZE);
2779 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002780 bounce.addr = addr;
2781 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002782
2783 memory_region_ref(mr);
2784 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002785 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002786 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2787 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002788 }
aliguori6d16c2f2009-01-22 16:59:11 +00002789
Paolo Bonzini41063e12015-03-18 14:21:43 +01002790 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002791 *plen = l;
2792 return bounce.buffer;
2793 }
2794
2795 base = xlat;
2796 raddr = memory_region_get_ram_addr(mr);
2797
2798 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002799 len -= l;
2800 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002801 done += l;
2802 if (len == 0) {
2803 break;
2804 }
2805
2806 l = len;
2807 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2808 if (this_mr != mr || xlat != base + done) {
2809 break;
2810 }
aliguori6d16c2f2009-01-22 16:59:11 +00002811 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002812
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002813 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002814 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002815 *plen = done;
2816 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002817}
2818
Avi Kivityac1970f2012-10-03 16:22:53 +02002819/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002820 * Will also mark the memory as dirty if is_write == 1. access_len gives
2821 * the amount of memory that was actually read or written by the caller.
2822 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002823void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2824 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002825{
2826 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002827 MemoryRegion *mr;
2828 ram_addr_t addr1;
2829
2830 mr = qemu_ram_addr_from_host(buffer, &addr1);
2831 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002832 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002833 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002834 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002835 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002836 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002837 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002838 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002839 return;
2840 }
2841 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002842 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2843 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002844 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002845 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002846 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002847 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002848 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002849 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002850}
bellardd0ecd2a2006-04-23 17:14:48 +00002851
Avi Kivitya8170e52012-10-23 12:30:10 +02002852void *cpu_physical_memory_map(hwaddr addr,
2853 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002854 int is_write)
2855{
2856 return address_space_map(&address_space_memory, addr, plen, is_write);
2857}
2858
Avi Kivitya8170e52012-10-23 12:30:10 +02002859void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2860 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002861{
2862 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2863}
2864
bellard8df1cd02005-01-28 22:37:22 +00002865/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002866static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2867 MemTxAttrs attrs,
2868 MemTxResult *result,
2869 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002870{
bellard8df1cd02005-01-28 22:37:22 +00002871 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002872 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002873 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002874 hwaddr l = 4;
2875 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002876 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002877 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002878
Paolo Bonzini41063e12015-03-18 14:21:43 +01002879 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002880 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002881 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002882 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002883
bellard8df1cd02005-01-28 22:37:22 +00002884 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002885 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002886#if defined(TARGET_WORDS_BIGENDIAN)
2887 if (endian == DEVICE_LITTLE_ENDIAN) {
2888 val = bswap32(val);
2889 }
2890#else
2891 if (endian == DEVICE_BIG_ENDIAN) {
2892 val = bswap32(val);
2893 }
2894#endif
bellard8df1cd02005-01-28 22:37:22 +00002895 } else {
2896 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002897 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002898 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002899 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002900 switch (endian) {
2901 case DEVICE_LITTLE_ENDIAN:
2902 val = ldl_le_p(ptr);
2903 break;
2904 case DEVICE_BIG_ENDIAN:
2905 val = ldl_be_p(ptr);
2906 break;
2907 default:
2908 val = ldl_p(ptr);
2909 break;
2910 }
Peter Maydell50013112015-04-26 16:49:24 +01002911 r = MEMTX_OK;
2912 }
2913 if (result) {
2914 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002915 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002916 if (release_lock) {
2917 qemu_mutex_unlock_iothread();
2918 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002919 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002920 return val;
2921}
2922
Peter Maydell50013112015-04-26 16:49:24 +01002923uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2924 MemTxAttrs attrs, MemTxResult *result)
2925{
2926 return address_space_ldl_internal(as, addr, attrs, result,
2927 DEVICE_NATIVE_ENDIAN);
2928}
2929
2930uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2931 MemTxAttrs attrs, MemTxResult *result)
2932{
2933 return address_space_ldl_internal(as, addr, attrs, result,
2934 DEVICE_LITTLE_ENDIAN);
2935}
2936
2937uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2938 MemTxAttrs attrs, MemTxResult *result)
2939{
2940 return address_space_ldl_internal(as, addr, attrs, result,
2941 DEVICE_BIG_ENDIAN);
2942}
2943
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002944uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002945{
Peter Maydell50013112015-04-26 16:49:24 +01002946 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002947}
2948
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002949uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002950{
Peter Maydell50013112015-04-26 16:49:24 +01002951 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002952}
2953
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002954uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002955{
Peter Maydell50013112015-04-26 16:49:24 +01002956 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002957}
2958
bellard84b7b8e2005-11-28 21:19:04 +00002959/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002960static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2961 MemTxAttrs attrs,
2962 MemTxResult *result,
2963 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002964{
bellard84b7b8e2005-11-28 21:19:04 +00002965 uint8_t *ptr;
2966 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002967 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002968 hwaddr l = 8;
2969 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002970 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002971 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002972
Paolo Bonzini41063e12015-03-18 14:21:43 +01002973 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002974 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002975 false);
2976 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002977 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002978
bellard84b7b8e2005-11-28 21:19:04 +00002979 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002980 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002981#if defined(TARGET_WORDS_BIGENDIAN)
2982 if (endian == DEVICE_LITTLE_ENDIAN) {
2983 val = bswap64(val);
2984 }
2985#else
2986 if (endian == DEVICE_BIG_ENDIAN) {
2987 val = bswap64(val);
2988 }
2989#endif
bellard84b7b8e2005-11-28 21:19:04 +00002990 } else {
2991 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002992 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002993 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002994 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002995 switch (endian) {
2996 case DEVICE_LITTLE_ENDIAN:
2997 val = ldq_le_p(ptr);
2998 break;
2999 case DEVICE_BIG_ENDIAN:
3000 val = ldq_be_p(ptr);
3001 break;
3002 default:
3003 val = ldq_p(ptr);
3004 break;
3005 }
Peter Maydell50013112015-04-26 16:49:24 +01003006 r = MEMTX_OK;
3007 }
3008 if (result) {
3009 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003010 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003011 if (release_lock) {
3012 qemu_mutex_unlock_iothread();
3013 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003014 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003015 return val;
3016}
3017
Peter Maydell50013112015-04-26 16:49:24 +01003018uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3019 MemTxAttrs attrs, MemTxResult *result)
3020{
3021 return address_space_ldq_internal(as, addr, attrs, result,
3022 DEVICE_NATIVE_ENDIAN);
3023}
3024
3025uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3026 MemTxAttrs attrs, MemTxResult *result)
3027{
3028 return address_space_ldq_internal(as, addr, attrs, result,
3029 DEVICE_LITTLE_ENDIAN);
3030}
3031
3032uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3033 MemTxAttrs attrs, MemTxResult *result)
3034{
3035 return address_space_ldq_internal(as, addr, attrs, result,
3036 DEVICE_BIG_ENDIAN);
3037}
3038
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003039uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003040{
Peter Maydell50013112015-04-26 16:49:24 +01003041 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003042}
3043
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003044uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003045{
Peter Maydell50013112015-04-26 16:49:24 +01003046 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003047}
3048
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003049uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003050{
Peter Maydell50013112015-04-26 16:49:24 +01003051 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003052}
3053
bellardaab33092005-10-30 20:48:42 +00003054/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003055uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3056 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003057{
3058 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003059 MemTxResult r;
3060
3061 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3062 if (result) {
3063 *result = r;
3064 }
bellardaab33092005-10-30 20:48:42 +00003065 return val;
3066}
3067
Peter Maydell50013112015-04-26 16:49:24 +01003068uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3069{
3070 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3071}
3072
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003073/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003074static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3075 hwaddr addr,
3076 MemTxAttrs attrs,
3077 MemTxResult *result,
3078 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003079{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003080 uint8_t *ptr;
3081 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003082 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003083 hwaddr l = 2;
3084 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003085 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003086 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003087
Paolo Bonzini41063e12015-03-18 14:21:43 +01003088 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003089 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003090 false);
3091 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003092 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003093
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003094 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003095 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003096#if defined(TARGET_WORDS_BIGENDIAN)
3097 if (endian == DEVICE_LITTLE_ENDIAN) {
3098 val = bswap16(val);
3099 }
3100#else
3101 if (endian == DEVICE_BIG_ENDIAN) {
3102 val = bswap16(val);
3103 }
3104#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003105 } else {
3106 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003107 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003108 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003109 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003110 switch (endian) {
3111 case DEVICE_LITTLE_ENDIAN:
3112 val = lduw_le_p(ptr);
3113 break;
3114 case DEVICE_BIG_ENDIAN:
3115 val = lduw_be_p(ptr);
3116 break;
3117 default:
3118 val = lduw_p(ptr);
3119 break;
3120 }
Peter Maydell50013112015-04-26 16:49:24 +01003121 r = MEMTX_OK;
3122 }
3123 if (result) {
3124 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003125 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003126 if (release_lock) {
3127 qemu_mutex_unlock_iothread();
3128 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003129 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003130 return val;
bellardaab33092005-10-30 20:48:42 +00003131}
3132
Peter Maydell50013112015-04-26 16:49:24 +01003133uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3134 MemTxAttrs attrs, MemTxResult *result)
3135{
3136 return address_space_lduw_internal(as, addr, attrs, result,
3137 DEVICE_NATIVE_ENDIAN);
3138}
3139
3140uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3141 MemTxAttrs attrs, MemTxResult *result)
3142{
3143 return address_space_lduw_internal(as, addr, attrs, result,
3144 DEVICE_LITTLE_ENDIAN);
3145}
3146
3147uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3148 MemTxAttrs attrs, MemTxResult *result)
3149{
3150 return address_space_lduw_internal(as, addr, attrs, result,
3151 DEVICE_BIG_ENDIAN);
3152}
3153
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003154uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003155{
Peter Maydell50013112015-04-26 16:49:24 +01003156 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157}
3158
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003159uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003160{
Peter Maydell50013112015-04-26 16:49:24 +01003161 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003162}
3163
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003164uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003165{
Peter Maydell50013112015-04-26 16:49:24 +01003166 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003167}
3168
bellard8df1cd02005-01-28 22:37:22 +00003169/* warning: addr must be aligned. The ram page is not masked as dirty
3170 and the code inside is not invalidated. It is useful if the dirty
3171 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003172void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3173 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003174{
bellard8df1cd02005-01-28 22:37:22 +00003175 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003176 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003177 hwaddr l = 4;
3178 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003179 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003180 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003181 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003182
Paolo Bonzini41063e12015-03-18 14:21:43 +01003183 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003184 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003185 true);
3186 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003187 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003188
Peter Maydell50013112015-04-26 16:49:24 +01003189 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003190 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003191 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003192 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003193 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003194
Paolo Bonzini845b6212015-03-23 11:45:53 +01003195 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3196 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003197 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003198 r = MEMTX_OK;
3199 }
3200 if (result) {
3201 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003202 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003203 if (release_lock) {
3204 qemu_mutex_unlock_iothread();
3205 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003206 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003207}
3208
Peter Maydell50013112015-04-26 16:49:24 +01003209void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3210{
3211 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3212}
3213
bellard8df1cd02005-01-28 22:37:22 +00003214/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003215static inline void address_space_stl_internal(AddressSpace *as,
3216 hwaddr addr, uint32_t val,
3217 MemTxAttrs attrs,
3218 MemTxResult *result,
3219 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003220{
bellard8df1cd02005-01-28 22:37:22 +00003221 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003222 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003223 hwaddr l = 4;
3224 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003225 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003226 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003227
Paolo Bonzini41063e12015-03-18 14:21:43 +01003228 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003229 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003230 true);
3231 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003232 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003233
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003234#if defined(TARGET_WORDS_BIGENDIAN)
3235 if (endian == DEVICE_LITTLE_ENDIAN) {
3236 val = bswap32(val);
3237 }
3238#else
3239 if (endian == DEVICE_BIG_ENDIAN) {
3240 val = bswap32(val);
3241 }
3242#endif
Peter Maydell50013112015-04-26 16:49:24 +01003243 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003244 } else {
bellard8df1cd02005-01-28 22:37:22 +00003245 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003246 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003247 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248 switch (endian) {
3249 case DEVICE_LITTLE_ENDIAN:
3250 stl_le_p(ptr, val);
3251 break;
3252 case DEVICE_BIG_ENDIAN:
3253 stl_be_p(ptr, val);
3254 break;
3255 default:
3256 stl_p(ptr, val);
3257 break;
3258 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003259 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003260 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003261 }
Peter Maydell50013112015-04-26 16:49:24 +01003262 if (result) {
3263 *result = r;
3264 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003265 if (release_lock) {
3266 qemu_mutex_unlock_iothread();
3267 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003268 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003269}
3270
3271void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3272 MemTxAttrs attrs, MemTxResult *result)
3273{
3274 address_space_stl_internal(as, addr, val, attrs, result,
3275 DEVICE_NATIVE_ENDIAN);
3276}
3277
3278void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3279 MemTxAttrs attrs, MemTxResult *result)
3280{
3281 address_space_stl_internal(as, addr, val, attrs, result,
3282 DEVICE_LITTLE_ENDIAN);
3283}
3284
3285void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3286 MemTxAttrs attrs, MemTxResult *result)
3287{
3288 address_space_stl_internal(as, addr, val, attrs, result,
3289 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003290}
3291
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003292void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003293{
Peter Maydell50013112015-04-26 16:49:24 +01003294 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003295}
3296
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003297void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003298{
Peter Maydell50013112015-04-26 16:49:24 +01003299 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003300}
3301
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003302void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003303{
Peter Maydell50013112015-04-26 16:49:24 +01003304 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003305}
3306
bellardaab33092005-10-30 20:48:42 +00003307/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003308void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3309 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003310{
3311 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003312 MemTxResult r;
3313
3314 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3315 if (result) {
3316 *result = r;
3317 }
3318}
3319
3320void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3321{
3322 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003323}
3324
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003325/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003326static inline void address_space_stw_internal(AddressSpace *as,
3327 hwaddr addr, uint32_t val,
3328 MemTxAttrs attrs,
3329 MemTxResult *result,
3330 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003331{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003332 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003333 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003334 hwaddr l = 2;
3335 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003336 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003337 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003338
Paolo Bonzini41063e12015-03-18 14:21:43 +01003339 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003340 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003341 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003342 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003343
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003344#if defined(TARGET_WORDS_BIGENDIAN)
3345 if (endian == DEVICE_LITTLE_ENDIAN) {
3346 val = bswap16(val);
3347 }
3348#else
3349 if (endian == DEVICE_BIG_ENDIAN) {
3350 val = bswap16(val);
3351 }
3352#endif
Peter Maydell50013112015-04-26 16:49:24 +01003353 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003354 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003355 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003356 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003357 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358 switch (endian) {
3359 case DEVICE_LITTLE_ENDIAN:
3360 stw_le_p(ptr, val);
3361 break;
3362 case DEVICE_BIG_ENDIAN:
3363 stw_be_p(ptr, val);
3364 break;
3365 default:
3366 stw_p(ptr, val);
3367 break;
3368 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003369 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003370 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003371 }
Peter Maydell50013112015-04-26 16:49:24 +01003372 if (result) {
3373 *result = r;
3374 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003375 if (release_lock) {
3376 qemu_mutex_unlock_iothread();
3377 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003378 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003379}
3380
3381void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3382 MemTxAttrs attrs, MemTxResult *result)
3383{
3384 address_space_stw_internal(as, addr, val, attrs, result,
3385 DEVICE_NATIVE_ENDIAN);
3386}
3387
3388void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3389 MemTxAttrs attrs, MemTxResult *result)
3390{
3391 address_space_stw_internal(as, addr, val, attrs, result,
3392 DEVICE_LITTLE_ENDIAN);
3393}
3394
3395void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3396 MemTxAttrs attrs, MemTxResult *result)
3397{
3398 address_space_stw_internal(as, addr, val, attrs, result,
3399 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003400}
3401
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003402void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003403{
Peter Maydell50013112015-04-26 16:49:24 +01003404 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003405}
3406
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003407void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003408{
Peter Maydell50013112015-04-26 16:49:24 +01003409 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003410}
3411
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003412void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413{
Peter Maydell50013112015-04-26 16:49:24 +01003414 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003415}
3416
bellardaab33092005-10-30 20:48:42 +00003417/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003418void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3419 MemTxAttrs attrs, MemTxResult *result)
3420{
3421 MemTxResult r;
3422 val = tswap64(val);
3423 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3424 if (result) {
3425 *result = r;
3426 }
3427}
3428
3429void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3430 MemTxAttrs attrs, MemTxResult *result)
3431{
3432 MemTxResult r;
3433 val = cpu_to_le64(val);
3434 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3435 if (result) {
3436 *result = r;
3437 }
3438}
3439void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3440 MemTxAttrs attrs, MemTxResult *result)
3441{
3442 MemTxResult r;
3443 val = cpu_to_be64(val);
3444 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3445 if (result) {
3446 *result = r;
3447 }
3448}
3449
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003450void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003451{
Peter Maydell50013112015-04-26 16:49:24 +01003452 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003453}
3454
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003455void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003456{
Peter Maydell50013112015-04-26 16:49:24 +01003457 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003458}
3459
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003460void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003461{
Peter Maydell50013112015-04-26 16:49:24 +01003462 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003463}
3464
aliguori5e2972f2009-03-28 17:51:36 +00003465/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003466int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003467 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003468{
3469 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003470 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003471 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003472
3473 while (len > 0) {
3474 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003475 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003476 /* if no physical page mapped, return an error */
3477 if (phys_addr == -1)
3478 return -1;
3479 l = (page + TARGET_PAGE_SIZE) - addr;
3480 if (l > len)
3481 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003482 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003483 if (is_write) {
3484 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3485 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003486 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3487 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003488 }
bellard13eb76e2004-01-24 15:23:36 +00003489 len -= l;
3490 buf += l;
3491 addr += l;
3492 }
3493 return 0;
3494}
Paul Brooka68fe892010-03-01 00:08:59 +00003495#endif
bellard13eb76e2004-01-24 15:23:36 +00003496
Blue Swirl8e4a4242013-01-06 18:30:17 +00003497/*
3498 * A helper function for the _utterly broken_ virtio device model to find out if
3499 * it's running on a big endian machine. Don't do this at home kids!
3500 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003501bool target_words_bigendian(void);
3502bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003503{
3504#if defined(TARGET_WORDS_BIGENDIAN)
3505 return true;
3506#else
3507 return false;
3508#endif
3509}
3510
Wen Congyang76f35532012-05-07 12:04:18 +08003511#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003512bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003513{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003514 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003515 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003516 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003517
Paolo Bonzini41063e12015-03-18 14:21:43 +01003518 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003519 mr = address_space_translate(&address_space_memory,
3520 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003521
Paolo Bonzini41063e12015-03-18 14:21:43 +01003522 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3523 rcu_read_unlock();
3524 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003525}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003526
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003527int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003528{
3529 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003530 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003531
Mike Day0dc3f442013-09-05 14:41:35 -04003532 rcu_read_lock();
3533 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003534 ret = func(block->idstr, block->host, block->offset,
3535 block->used_length, opaque);
3536 if (ret) {
3537 break;
3538 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003539 }
Mike Day0dc3f442013-09-05 14:41:35 -04003540 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003541 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003542}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003543#endif