blob: 7f0ce42af0c70772d784f0976d3452b1cb7689fe [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200311static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200312 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000313{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200314 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200315 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200316 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200317
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200318 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200319 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200320 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200321 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100323 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200324 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200325
326 if (sections[lp.ptr].size.hi ||
327 range_covers_byte(sections[lp.ptr].offset_within_address_space,
328 sections[lp.ptr].size.lo, addr)) {
329 return &sections[lp.ptr];
330 } else {
331 return &sections[PHYS_SECTION_UNASSIGNED];
332 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200333}
334
Blue Swirle5548612012-04-21 13:08:33 +0000335bool memory_region_is_unassigned(MemoryRegion *mr)
336{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200337 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000338 && mr != &io_mem_watch;
339}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200340
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100341/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200342static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200343 hwaddr addr,
344 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200345{
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 MemoryRegionSection *section;
347 subpage_t *subpage;
348
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200349 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200350 if (resolve_subpage && section->mr->subpage) {
351 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 }
354 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200355}
356
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100357/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200358static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200359address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200361{
362 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200363 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200365
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200366 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200367 /* Compute offset within MemoryRegionSection */
368 addr -= section->offset_within_address_space;
369
370 /* Compute offset within MemoryRegion */
371 *xlat = addr + section->offset_within_region;
372
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200373 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200374
375 /* MMIO registers can be expected to perform full-width accesses based only
376 * on their address, without considering adjacent registers that could
377 * decode to completely different MemoryRegions. When such registers
378 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
379 * regions overlap wildly. For this reason we cannot clamp the accesses
380 * here.
381 *
382 * If the length is small (as is the case for address_space_ldl/stl),
383 * everything works fine. If the incoming length is large, however,
384 * the caller really has to do the clamping through memory_access_size.
385 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200387 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
389 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 return section;
391}
Jan Kiszka90260c62013-05-26 21:46:51 +0200392
Paolo Bonzini41063e12015-03-18 14:21:43 +0100393/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200394MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
395 hwaddr *xlat, hwaddr *plen,
396 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200397{
Avi Kivity30951152012-10-30 13:47:46 +0200398 IOMMUTLBEntry iotlb;
399 MemoryRegionSection *section;
400 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200401
402 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100403 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
404 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200405 mr = section->mr;
406
407 if (!mr->iommu_ops) {
408 break;
409 }
410
Le Tan8d7b8cb2014-08-16 13:55:37 +0800411 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200412 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
413 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700414 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200415 if (!(iotlb.perm & (1 << is_write))) {
416 mr = &io_mem_unassigned;
417 break;
418 }
419
420 as = iotlb.target_as;
421 }
422
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000423 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100424 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700425 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100426 }
427
Avi Kivity30951152012-10-30 13:47:46 +0200428 *xlat = addr;
429 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200430}
431
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100432/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200433MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200434address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
435 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200436{
Avi Kivity30951152012-10-30 13:47:46 +0200437 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100438 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200439 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200440
441 assert(!section->mr->iommu_ops);
442 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200443}
bellard9fa3e852004-01-04 18:06:42 +0000444#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000445
Andreas Färberb170fce2013-01-20 20:23:22 +0100446#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000447
Juan Quintelae59fb372009-09-29 22:48:21 +0200448static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200449{
Andreas Färber259186a2013-01-17 18:51:17 +0100450 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200451
aurel323098dba2009-03-07 21:28:24 +0000452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100454 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100455 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000456
457 return 0;
458}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200459
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400460static int cpu_common_pre_load(void *opaque)
461{
462 CPUState *cpu = opaque;
463
Paolo Bonziniadee6422014-12-19 12:53:14 +0100464 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400465
466 return 0;
467}
468
469static bool cpu_common_exception_index_needed(void *opaque)
470{
471 CPUState *cpu = opaque;
472
Paolo Bonziniadee6422014-12-19 12:53:14 +0100473 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474}
475
476static const VMStateDescription vmstate_cpu_common_exception_index = {
477 .name = "cpu_common/exception_index",
478 .version_id = 1,
479 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200480 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400481 .fields = (VMStateField[]) {
482 VMSTATE_INT32(exception_index, CPUState),
483 VMSTATE_END_OF_LIST()
484 }
485};
486
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300487static bool cpu_common_crash_occurred_needed(void *opaque)
488{
489 CPUState *cpu = opaque;
490
491 return cpu->crash_occurred;
492}
493
494static const VMStateDescription vmstate_cpu_common_crash_occurred = {
495 .name = "cpu_common/crash_occurred",
496 .version_id = 1,
497 .minimum_version_id = 1,
498 .needed = cpu_common_crash_occurred_needed,
499 .fields = (VMStateField[]) {
500 VMSTATE_BOOL(crash_occurred, CPUState),
501 VMSTATE_END_OF_LIST()
502 }
503};
504
Andreas Färber1a1562f2013-06-17 04:09:11 +0200505const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200506 .name = "cpu_common",
507 .version_id = 1,
508 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400509 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200511 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100512 VMSTATE_UINT32(halted, CPUState),
513 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200514 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400515 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200516 .subsections = (const VMStateDescription*[]) {
517 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300518 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200519 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200520 }
521};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200522
pbrook9656f322008-07-01 20:01:19 +0000523#endif
524
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100525CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400526{
Andreas Färberbdc44642013-06-24 23:50:24 +0200527 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400528
Andreas Färberbdc44642013-06-24 23:50:24 +0200529 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100530 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200531 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100532 }
Glauber Costa950f1472009-06-09 12:15:18 -0400533 }
534
Andreas Färberbdc44642013-06-24 23:50:24 +0200535 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400536}
537
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000538#if !defined(CONFIG_USER_ONLY)
539void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
540{
541 /* We only support one address space per cpu at the moment. */
542 assert(cpu->as == as);
543
Peter Maydell32857f42015-10-01 15:29:50 +0100544 if (cpu->cpu_ases) {
545 /* We've already registered the listener for our only AS */
546 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000547 }
Peter Maydell32857f42015-10-01 15:29:50 +0100548
549 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
550 cpu->cpu_ases[0].cpu = cpu;
551 cpu->cpu_ases[0].as = as;
552 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
553 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000554}
555#endif
556
Bharata B Raob7bca732015-06-23 19:31:13 -0700557#ifndef CONFIG_USER_ONLY
558static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
559
560static int cpu_get_free_index(Error **errp)
561{
562 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
563
564 if (cpu >= MAX_CPUMASK_BITS) {
565 error_setg(errp, "Trying to use more CPUs than max of %d",
566 MAX_CPUMASK_BITS);
567 return -1;
568 }
569
570 bitmap_set(cpu_index_map, cpu, 1);
571 return cpu;
572}
573
574void cpu_exec_exit(CPUState *cpu)
575{
576 if (cpu->cpu_index == -1) {
577 /* cpu_index was never allocated by this @cpu or was already freed. */
578 return;
579 }
580
581 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
582 cpu->cpu_index = -1;
583}
584#else
585
586static int cpu_get_free_index(Error **errp)
587{
588 CPUState *some_cpu;
589 int cpu_index = 0;
590
591 CPU_FOREACH(some_cpu) {
592 cpu_index++;
593 }
594 return cpu_index;
595}
596
597void cpu_exec_exit(CPUState *cpu)
598{
599}
600#endif
601
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700602void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000603{
Andreas Färberb170fce2013-01-20 20:23:22 +0100604 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000605 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700606 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000607
Eduardo Habkost291135b2015-04-27 17:00:33 -0300608#ifndef CONFIG_USER_ONLY
609 cpu->as = &address_space_memory;
610 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300611#endif
612
pbrookc2764712009-03-07 15:24:59 +0000613#if defined(CONFIG_USER_ONLY)
614 cpu_list_lock();
615#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700616 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
617 if (local_err) {
618 error_propagate(errp, local_err);
619#if defined(CONFIG_USER_ONLY)
620 cpu_list_unlock();
621#endif
622 return;
bellard6a00d602005-11-21 23:25:50 +0000623 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200624 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000625#if defined(CONFIG_USER_ONLY)
626 cpu_list_unlock();
627#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200628 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
629 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
630 }
pbrookb3c77242008-06-30 16:31:04 +0000631#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600632 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700633 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100634 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200635 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000636#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100637 if (cc->vmsd != NULL) {
638 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
639 }
bellardfd6ce8f2003-05-14 19:00:11 +0000640}
641
Paul Brook94df27f2010-02-28 23:47:45 +0000642#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200643static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000644{
645 tb_invalidate_phys_page_range(pc, pc + 1, 0);
646}
647#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200648static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400649{
Max Filippove8262a12013-09-27 22:29:17 +0400650 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
651 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000652 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100653 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400654 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400655}
bellardc27004e2005-01-03 23:35:10 +0000656#endif
bellardd720b932004-04-25 17:57:43 +0000657
Paul Brookc527ee82010-03-01 03:31:14 +0000658#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200659void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000660
661{
662}
663
Peter Maydell3ee887e2014-09-12 14:06:48 +0100664int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
665 int flags)
666{
667 return -ENOSYS;
668}
669
670void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
671{
672}
673
Andreas Färber75a34032013-09-02 16:57:02 +0200674int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000675 int flags, CPUWatchpoint **watchpoint)
676{
677 return -ENOSYS;
678}
679#else
pbrook6658ffb2007-03-16 23:58:11 +0000680/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200681int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000682 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000683{
aliguoric0ce9982008-11-25 22:13:57 +0000684 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000685
Peter Maydell05068c02014-09-12 14:06:48 +0100686 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700687 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200688 error_report("tried to set invalid watchpoint at %"
689 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000690 return -EINVAL;
691 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500692 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000693
aliguoria1d1bb32008-11-18 20:07:32 +0000694 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100695 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000696 wp->flags = flags;
697
aliguori2dc9f412008-11-18 20:56:59 +0000698 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200699 if (flags & BP_GDB) {
700 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
701 } else {
702 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
703 }
aliguoria1d1bb32008-11-18 20:07:32 +0000704
Andreas Färber31b030d2013-09-04 01:29:02 +0200705 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000706
707 if (watchpoint)
708 *watchpoint = wp;
709 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000710}
711
aliguoria1d1bb32008-11-18 20:07:32 +0000712/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200713int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000714 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000715{
aliguoria1d1bb32008-11-18 20:07:32 +0000716 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000717
Andreas Färberff4700b2013-08-26 18:23:18 +0200718 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100719 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000720 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200721 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000722 return 0;
723 }
724 }
aliguoria1d1bb32008-11-18 20:07:32 +0000725 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000726}
727
aliguoria1d1bb32008-11-18 20:07:32 +0000728/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200729void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000730{
Andreas Färberff4700b2013-08-26 18:23:18 +0200731 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000732
Andreas Färber31b030d2013-09-04 01:29:02 +0200733 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000734
Anthony Liguori7267c092011-08-20 22:09:37 -0500735 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000736}
737
aliguoria1d1bb32008-11-18 20:07:32 +0000738/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200739void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000740{
aliguoric0ce9982008-11-25 22:13:57 +0000741 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000742
Andreas Färberff4700b2013-08-26 18:23:18 +0200743 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200744 if (wp->flags & mask) {
745 cpu_watchpoint_remove_by_ref(cpu, wp);
746 }
aliguoric0ce9982008-11-25 22:13:57 +0000747 }
aliguoria1d1bb32008-11-18 20:07:32 +0000748}
Peter Maydell05068c02014-09-12 14:06:48 +0100749
750/* Return true if this watchpoint address matches the specified
751 * access (ie the address range covered by the watchpoint overlaps
752 * partially or completely with the address range covered by the
753 * access).
754 */
755static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
756 vaddr addr,
757 vaddr len)
758{
759 /* We know the lengths are non-zero, but a little caution is
760 * required to avoid errors in the case where the range ends
761 * exactly at the top of the address space and so addr + len
762 * wraps round to zero.
763 */
764 vaddr wpend = wp->vaddr + wp->len - 1;
765 vaddr addrend = addr + len - 1;
766
767 return !(addr > wpend || wp->vaddr > addrend);
768}
769
Paul Brookc527ee82010-03-01 03:31:14 +0000770#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000771
772/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200773int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000774 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000775{
aliguoric0ce9982008-11-25 22:13:57 +0000776 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000777
Anthony Liguori7267c092011-08-20 22:09:37 -0500778 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000779
780 bp->pc = pc;
781 bp->flags = flags;
782
aliguori2dc9f412008-11-18 20:56:59 +0000783 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200784 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200785 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200786 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200787 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200788 }
aliguoria1d1bb32008-11-18 20:07:32 +0000789
Andreas Färberf0c3c502013-08-26 21:22:53 +0200790 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000791
Andreas Färber00b941e2013-06-29 18:55:54 +0200792 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000793 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200794 }
aliguoria1d1bb32008-11-18 20:07:32 +0000795 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000796}
797
798/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200799int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000800{
aliguoria1d1bb32008-11-18 20:07:32 +0000801 CPUBreakpoint *bp;
802
Andreas Färberf0c3c502013-08-26 21:22:53 +0200803 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000804 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200805 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000806 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000807 }
bellard4c3a88a2003-07-26 12:06:08 +0000808 }
aliguoria1d1bb32008-11-18 20:07:32 +0000809 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000810}
811
aliguoria1d1bb32008-11-18 20:07:32 +0000812/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200813void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000814{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200815 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
816
817 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000818
Anthony Liguori7267c092011-08-20 22:09:37 -0500819 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000820}
821
822/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200823void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000824{
aliguoric0ce9982008-11-25 22:13:57 +0000825 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000826
Andreas Färberf0c3c502013-08-26 21:22:53 +0200827 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200828 if (bp->flags & mask) {
829 cpu_breakpoint_remove_by_ref(cpu, bp);
830 }
aliguoric0ce9982008-11-25 22:13:57 +0000831 }
bellard4c3a88a2003-07-26 12:06:08 +0000832}
833
bellardc33a3462003-07-29 20:50:33 +0000834/* enable or disable single step mode. EXCP_DEBUG is returned by the
835 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200836void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000837{
Andreas Färbered2803d2013-06-21 20:20:45 +0200838 if (cpu->singlestep_enabled != enabled) {
839 cpu->singlestep_enabled = enabled;
840 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200841 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200842 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100843 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000844 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700845 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000846 }
bellardc33a3462003-07-29 20:50:33 +0000847 }
bellardc33a3462003-07-29 20:50:33 +0000848}
849
Andreas Färbera47dddd2013-09-03 17:38:47 +0200850void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000851{
852 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000853 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000854
855 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000856 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000857 fprintf(stderr, "qemu: fatal: ");
858 vfprintf(stderr, fmt, ap);
859 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200860 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100861 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000862 qemu_log("qemu: fatal: ");
863 qemu_log_vprintf(fmt, ap2);
864 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200865 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000866 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000867 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000868 }
pbrook493ae1f2007-11-23 16:53:59 +0000869 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000870 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300871 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200872#if defined(CONFIG_USER_ONLY)
873 {
874 struct sigaction act;
875 sigfillset(&act.sa_mask);
876 act.sa_handler = SIG_DFL;
877 sigaction(SIGABRT, &act, NULL);
878 }
879#endif
bellard75012672003-06-21 13:11:07 +0000880 abort();
881}
882
bellard01243112004-01-04 15:48:17 +0000883#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400884/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200885static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
886{
887 RAMBlock *block;
888
Paolo Bonzini43771532013-09-09 17:58:40 +0200889 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200890 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200891 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200892 }
Mike Day0dc3f442013-09-05 14:41:35 -0400893 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200894 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200895 goto found;
896 }
897 }
898
899 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
900 abort();
901
902found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200903 /* It is safe to write mru_block outside the iothread lock. This
904 * is what happens:
905 *
906 * mru_block = xxx
907 * rcu_read_unlock()
908 * xxx removed from list
909 * rcu_read_lock()
910 * read mru_block
911 * mru_block = NULL;
912 * call_rcu(reclaim_ramblock, xxx);
913 * rcu_read_unlock()
914 *
915 * atomic_rcu_set is not needed here. The block was already published
916 * when it was placed into the list. Here we're just making an extra
917 * copy of the pointer.
918 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200919 ram_list.mru_block = block;
920 return block;
921}
922
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200923static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000924{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700925 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200926 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200927 RAMBlock *block;
928 ram_addr_t end;
929
930 end = TARGET_PAGE_ALIGN(start + length);
931 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000932
Mike Day0dc3f442013-09-05 14:41:35 -0400933 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200934 block = qemu_get_ram_block(start);
935 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200936 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700937 CPU_FOREACH(cpu) {
938 tlb_reset_dirty(cpu, start1, length);
939 }
Mike Day0dc3f442013-09-05 14:41:35 -0400940 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200941}
942
943/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000944bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
945 ram_addr_t length,
946 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200947{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000948 unsigned long end, page;
949 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200950
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000951 if (length == 0) {
952 return false;
953 }
954
955 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
956 page = start >> TARGET_PAGE_BITS;
957 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
958 page, end - page);
959
960 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200961 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200962 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963
964 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000965}
966
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100967/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200968hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200969 MemoryRegionSection *section,
970 target_ulong vaddr,
971 hwaddr paddr, hwaddr xlat,
972 int prot,
973 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000974{
Avi Kivitya8170e52012-10-23 12:30:10 +0200975 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000976 CPUWatchpoint *wp;
977
Blue Swirlcc5bea62012-04-14 14:56:48 +0000978 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000979 /* Normal RAM. */
980 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200981 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000982 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200983 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000984 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200985 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000986 }
987 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +0100988 AddressSpaceDispatch *d;
989
990 d = atomic_rcu_read(&section->address_space->dispatch);
991 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200992 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000993 }
994
995 /* Make accesses to pages with watchpoints go via the
996 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200997 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100998 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000999 /* Avoid trapping reads of pages with a write breakpoint. */
1000 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001001 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001002 *address |= TLB_MMIO;
1003 break;
1004 }
1005 }
1006 }
1007
1008 return iotlb;
1009}
bellard9fa3e852004-01-04 18:06:42 +00001010#endif /* defined(CONFIG_USER_ONLY) */
1011
pbrooke2eef172008-06-08 01:09:01 +00001012#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001013
Anthony Liguoric227f092009-10-01 16:12:16 -05001014static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001015 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001016static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001017
Igor Mammedova2b257d2014-10-31 16:38:37 +00001018static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1019 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001020
1021/*
1022 * Set a custom physical guest memory alloator.
1023 * Accelerators with unusual needs may need this. Hopefully, we can
1024 * get rid of it eventually.
1025 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001026void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001027{
1028 phys_mem_alloc = alloc;
1029}
1030
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001031static uint16_t phys_section_add(PhysPageMap *map,
1032 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001033{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001034 /* The physical section number is ORed with a page-aligned
1035 * pointer to produce the iotlb entries. Thus it should
1036 * never overflow into the page-aligned value.
1037 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001038 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001039
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001040 if (map->sections_nb == map->sections_nb_alloc) {
1041 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1042 map->sections = g_renew(MemoryRegionSection, map->sections,
1043 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001044 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001045 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001046 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001047 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001048}
1049
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001050static void phys_section_destroy(MemoryRegion *mr)
1051{
Don Slutz55b4e802015-11-30 17:11:04 -05001052 bool have_sub_page = mr->subpage;
1053
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001054 memory_region_unref(mr);
1055
Don Slutz55b4e802015-11-30 17:11:04 -05001056 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001057 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001058 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001059 g_free(subpage);
1060 }
1061}
1062
Paolo Bonzini60926662013-05-29 12:30:26 +02001063static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001064{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001065 while (map->sections_nb > 0) {
1066 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001067 phys_section_destroy(section->mr);
1068 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001069 g_free(map->sections);
1070 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001071}
1072
Avi Kivityac1970f2012-10-03 16:22:53 +02001073static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001074{
1075 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001076 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001077 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001078 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001079 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001080 MemoryRegionSection subsection = {
1081 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001082 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001083 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001084 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085
Avi Kivityf3705d52012-03-08 16:16:34 +02001086 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001087
Avi Kivityf3705d52012-03-08 16:16:34 +02001088 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001089 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001090 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001091 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001092 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001093 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001094 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001095 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096 }
1097 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001098 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099 subpage_register(subpage, start, end,
1100 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001101}
1102
1103
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001104static void register_multipage(AddressSpaceDispatch *d,
1105 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001106{
Avi Kivitya8170e52012-10-23 12:30:10 +02001107 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1110 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001111
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001112 assert(num_pages);
1113 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001114}
1115
Avi Kivityac1970f2012-10-03 16:22:53 +02001116static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001117{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001118 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001119 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001120 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001121 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001122
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001123 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1124 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1125 - now.offset_within_address_space;
1126
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001127 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001128 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001129 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001130 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001131 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 while (int128_ne(remain.size, now.size)) {
1133 remain.size = int128_sub(remain.size, now.size);
1134 remain.offset_within_address_space += int128_get64(now.size);
1135 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001136 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001137 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001138 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001139 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001140 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001141 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001142 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001144 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001145 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 }
1147}
1148
Sheng Yang62a27442010-01-26 19:21:16 +08001149void qemu_flush_coalesced_mmio_buffer(void)
1150{
1151 if (kvm_enabled())
1152 kvm_flush_coalesced_mmio_buffer();
1153}
1154
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001155void qemu_mutex_lock_ramlist(void)
1156{
1157 qemu_mutex_lock(&ram_list.mutex);
1158}
1159
1160void qemu_mutex_unlock_ramlist(void)
1161{
1162 qemu_mutex_unlock(&ram_list.mutex);
1163}
1164
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001165#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001166
1167#include <sys/vfs.h>
1168
1169#define HUGETLBFS_MAGIC 0x958458f6
1170
Hu Taofc7a5802014-09-09 13:28:01 +08001171static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001172{
1173 struct statfs fs;
1174 int ret;
1175
1176 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001177 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001178 } while (ret != 0 && errno == EINTR);
1179
1180 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001181 error_setg_errno(errp, errno, "failed to get page size of file %s",
1182 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001183 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001184 }
1185
Marcelo Tosattic9027602010-03-01 20:25:08 -03001186 return fs.f_bsize;
1187}
1188
Alex Williamson04b16652010-07-02 11:13:17 -06001189static void *file_ram_alloc(RAMBlock *block,
1190 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001191 const char *path,
1192 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001193{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001194 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001196 char *sanitized_name;
1197 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001198 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001200 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001201 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001202
Hu Taofc7a5802014-09-09 13:28:01 +08001203 hpagesize = gethugepagesize(path, &local_err);
1204 if (local_err) {
1205 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001206 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001207 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001208 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209
1210 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001211 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1212 "or larger than huge page size 0x%" PRIx64,
1213 memory, hpagesize);
1214 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215 }
1216
1217 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001218 error_setg(errp,
1219 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001220 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221 }
1222
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001223 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1224 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1225 sanitized_name = g_strdup(memory_region_name(block->mr));
1226 for (c = sanitized_name; *c != '\0'; c++) {
1227 if (*c == '/') {
1228 *c = '_';
1229 }
1230 }
1231
1232 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1233 sanitized_name);
1234 g_free(sanitized_name);
1235
1236 fd = mkstemp(filename);
1237 if (fd >= 0) {
1238 unlink(filename);
1239 }
1240 g_free(filename);
1241 } else {
1242 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001243 }
1244
Marcelo Tosattic9027602010-03-01 20:25:08 -03001245 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001246 error_setg_errno(errp, errno,
1247 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001248 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001249 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001250
Chen Hanxiao9284f312015-07-24 11:12:03 +08001251 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001252
1253 /*
1254 * ftruncate is not supported by hugetlbfs in older
1255 * hosts, so don't bother bailing out on errors.
1256 * If anything goes wrong with it under other filesystems,
1257 * mmap will fail.
1258 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001259 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001260 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001261 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001262
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001263 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001265 error_setg_errno(errp, errno,
1266 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001267 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001268 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001269 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001270
1271 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001272 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001273 }
1274
Alex Williamson04b16652010-07-02 11:13:17 -06001275 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001276 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001277
1278error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001279 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001280}
1281#endif
1282
Mike Day0dc3f442013-09-05 14:41:35 -04001283/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001284static ram_addr_t find_ram_offset(ram_addr_t size)
1285{
Alex Williamson04b16652010-07-02 11:13:17 -06001286 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001287 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001288
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001289 assert(size != 0); /* it would hand out same offset multiple times */
1290
Mike Day0dc3f442013-09-05 14:41:35 -04001291 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001292 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001293 }
Alex Williamson04b16652010-07-02 11:13:17 -06001294
Mike Day0dc3f442013-09-05 14:41:35 -04001295 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001296 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001297
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001298 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001299
Mike Day0dc3f442013-09-05 14:41:35 -04001300 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001301 if (next_block->offset >= end) {
1302 next = MIN(next, next_block->offset);
1303 }
1304 }
1305 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001306 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001307 mingap = next - end;
1308 }
1309 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001310
1311 if (offset == RAM_ADDR_MAX) {
1312 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1313 (uint64_t)size);
1314 abort();
1315 }
1316
Alex Williamson04b16652010-07-02 11:13:17 -06001317 return offset;
1318}
1319
Juan Quintela652d7ec2012-07-20 10:37:54 +02001320ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001321{
Alex Williamsond17b5282010-06-25 11:08:38 -06001322 RAMBlock *block;
1323 ram_addr_t last = 0;
1324
Mike Day0dc3f442013-09-05 14:41:35 -04001325 rcu_read_lock();
1326 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001327 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001328 }
Mike Day0dc3f442013-09-05 14:41:35 -04001329 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001330 return last;
1331}
1332
Jason Baronddb97f12012-08-02 15:44:16 -04001333static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1334{
1335 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001336
1337 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001338 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001339 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1340 if (ret) {
1341 perror("qemu_madvise");
1342 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1343 "but dump_guest_core=off specified\n");
1344 }
1345 }
1346}
1347
Mike Day0dc3f442013-09-05 14:41:35 -04001348/* Called within an RCU critical section, or while the ramlist lock
1349 * is held.
1350 */
Hu Tao20cfe882014-04-02 15:13:26 +08001351static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001352{
Hu Tao20cfe882014-04-02 15:13:26 +08001353 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001354
Mike Day0dc3f442013-09-05 14:41:35 -04001355 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001356 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001357 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001358 }
1359 }
Hu Tao20cfe882014-04-02 15:13:26 +08001360
1361 return NULL;
1362}
1363
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001364const char *qemu_ram_get_idstr(RAMBlock *rb)
1365{
1366 return rb->idstr;
1367}
1368
Mike Dayae3a7042013-09-05 14:41:35 -04001369/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001370void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1371{
Mike Dayae3a7042013-09-05 14:41:35 -04001372 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001373
Mike Day0dc3f442013-09-05 14:41:35 -04001374 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001375 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001376 assert(new_block);
1377 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001378
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001379 if (dev) {
1380 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001381 if (id) {
1382 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001383 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001384 }
1385 }
1386 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1387
Mike Day0dc3f442013-09-05 14:41:35 -04001388 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001389 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001390 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1391 new_block->idstr);
1392 abort();
1393 }
1394 }
Mike Day0dc3f442013-09-05 14:41:35 -04001395 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001396}
1397
Mike Dayae3a7042013-09-05 14:41:35 -04001398/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001399void qemu_ram_unset_idstr(ram_addr_t addr)
1400{
Mike Dayae3a7042013-09-05 14:41:35 -04001401 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001402
Mike Dayae3a7042013-09-05 14:41:35 -04001403 /* FIXME: arch_init.c assumes that this is not called throughout
1404 * migration. Ignore the problem since hot-unplug during migration
1405 * does not work anyway.
1406 */
1407
Mike Day0dc3f442013-09-05 14:41:35 -04001408 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001409 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001410 if (block) {
1411 memset(block->idstr, 0, sizeof(block->idstr));
1412 }
Mike Day0dc3f442013-09-05 14:41:35 -04001413 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001414}
1415
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001416static int memory_try_enable_merging(void *addr, size_t len)
1417{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001418 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001419 /* disabled by the user */
1420 return 0;
1421 }
1422
1423 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1424}
1425
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001426/* Only legal before guest might have detected the memory size: e.g. on
1427 * incoming migration, or right after reset.
1428 *
1429 * As memory core doesn't know how is memory accessed, it is up to
1430 * resize callback to update device state and/or add assertions to detect
1431 * misuse, if necessary.
1432 */
1433int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1434{
1435 RAMBlock *block = find_ram_block(base);
1436
1437 assert(block);
1438
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001439 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001440
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001441 if (block->used_length == newsize) {
1442 return 0;
1443 }
1444
1445 if (!(block->flags & RAM_RESIZEABLE)) {
1446 error_setg_errno(errp, EINVAL,
1447 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1448 " in != 0x" RAM_ADDR_FMT, block->idstr,
1449 newsize, block->used_length);
1450 return -EINVAL;
1451 }
1452
1453 if (block->max_length < newsize) {
1454 error_setg_errno(errp, EINVAL,
1455 "Length too large: %s: 0x" RAM_ADDR_FMT
1456 " > 0x" RAM_ADDR_FMT, block->idstr,
1457 newsize, block->max_length);
1458 return -EINVAL;
1459 }
1460
1461 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1462 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001463 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1464 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001465 memory_region_set_size(block->mr, newsize);
1466 if (block->resized) {
1467 block->resized(block->idstr, newsize, block->host);
1468 }
1469 return 0;
1470}
1471
Hu Taoef701d72014-09-09 13:27:54 +08001472static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001473{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001474 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001475 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001476 ram_addr_t old_ram_size, new_ram_size;
1477
1478 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001479
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001480 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001481 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001482
1483 if (!new_block->host) {
1484 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001485 xen_ram_alloc(new_block->offset, new_block->max_length,
1486 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001487 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001488 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001489 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001490 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001491 error_setg_errno(errp, errno,
1492 "cannot set up guest memory '%s'",
1493 memory_region_name(new_block->mr));
1494 qemu_mutex_unlock_ramlist();
1495 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001496 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001497 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001498 }
1499 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001500
Li Zhijiandd631692015-07-02 20:18:06 +08001501 new_ram_size = MAX(old_ram_size,
1502 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1503 if (new_ram_size > old_ram_size) {
1504 migration_bitmap_extend(old_ram_size, new_ram_size);
1505 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001506 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1507 * QLIST (which has an RCU-friendly variant) does not have insertion at
1508 * tail, so save the last element in last_block.
1509 */
Mike Day0dc3f442013-09-05 14:41:35 -04001510 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001511 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001512 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001513 break;
1514 }
1515 }
1516 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001517 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001518 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001519 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001520 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001521 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001522 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001523 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001524
Mike Day0dc3f442013-09-05 14:41:35 -04001525 /* Write list before version */
1526 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001527 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001528 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001529
Juan Quintela2152f5c2013-10-08 13:52:02 +02001530 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1531
1532 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001533 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001534
1535 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001536 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1537 ram_list.dirty_memory[i] =
1538 bitmap_zero_extend(ram_list.dirty_memory[i],
1539 old_ram_size, new_ram_size);
1540 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001541 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001542 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001543 new_block->used_length,
1544 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001545
Paolo Bonzinia904c912015-01-21 16:18:35 +01001546 if (new_block->host) {
1547 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1548 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1549 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1550 if (kvm_enabled()) {
1551 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1552 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001553 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001554
1555 return new_block->offset;
1556}
1557
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001558#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001559ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001560 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001561 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001562{
1563 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001564 ram_addr_t addr;
1565 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001566
1567 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001568 error_setg(errp, "-mem-path not supported with Xen");
1569 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001570 }
1571
1572 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1573 /*
1574 * file_ram_alloc() needs to allocate just like
1575 * phys_mem_alloc, but we haven't bothered to provide
1576 * a hook there.
1577 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001578 error_setg(errp,
1579 "-mem-path not supported with this accelerator");
1580 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581 }
1582
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001583 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001584 new_block = g_malloc0(sizeof(*new_block));
1585 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001586 new_block->used_length = size;
1587 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001588 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001589 new_block->host = file_ram_alloc(new_block, size,
1590 mem_path, errp);
1591 if (!new_block->host) {
1592 g_free(new_block);
1593 return -1;
1594 }
1595
Hu Taoef701d72014-09-09 13:27:54 +08001596 addr = ram_block_add(new_block, &local_err);
1597 if (local_err) {
1598 g_free(new_block);
1599 error_propagate(errp, local_err);
1600 return -1;
1601 }
1602 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001603}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001604#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001605
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001606static
1607ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1608 void (*resized)(const char*,
1609 uint64_t length,
1610 void *host),
1611 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001612 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001613{
1614 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001615 ram_addr_t addr;
1616 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001617
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001618 size = HOST_PAGE_ALIGN(size);
1619 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001620 new_block = g_malloc0(sizeof(*new_block));
1621 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001622 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001623 new_block->used_length = size;
1624 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001625 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001626 new_block->fd = -1;
1627 new_block->host = host;
1628 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001629 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001630 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001631 if (resizeable) {
1632 new_block->flags |= RAM_RESIZEABLE;
1633 }
Hu Taoef701d72014-09-09 13:27:54 +08001634 addr = ram_block_add(new_block, &local_err);
1635 if (local_err) {
1636 g_free(new_block);
1637 error_propagate(errp, local_err);
1638 return -1;
1639 }
1640 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001641}
1642
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001643ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1644 MemoryRegion *mr, Error **errp)
1645{
1646 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1647}
1648
Hu Taoef701d72014-09-09 13:27:54 +08001649ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001650{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001651 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1652}
1653
1654ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1655 void (*resized)(const char*,
1656 uint64_t length,
1657 void *host),
1658 MemoryRegion *mr, Error **errp)
1659{
1660 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001661}
bellarde9a1ab12007-02-08 23:08:38 +00001662
Paolo Bonzini43771532013-09-09 17:58:40 +02001663static void reclaim_ramblock(RAMBlock *block)
1664{
1665 if (block->flags & RAM_PREALLOC) {
1666 ;
1667 } else if (xen_enabled()) {
1668 xen_invalidate_map_cache_entry(block->host);
1669#ifndef _WIN32
1670 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001671 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001672 close(block->fd);
1673#endif
1674 } else {
1675 qemu_anon_ram_free(block->host, block->max_length);
1676 }
1677 g_free(block);
1678}
1679
Anthony Liguoric227f092009-10-01 16:12:16 -05001680void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001681{
Alex Williamson04b16652010-07-02 11:13:17 -06001682 RAMBlock *block;
1683
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001684 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001685 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001686 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001687 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001688 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001689 /* Write list before version */
1690 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001691 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001692 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001693 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001694 }
1695 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001696 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001697}
1698
Huang Yingcd19cfa2011-03-02 08:56:19 +01001699#ifndef _WIN32
1700void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1701{
1702 RAMBlock *block;
1703 ram_addr_t offset;
1704 int flags;
1705 void *area, *vaddr;
1706
Mike Day0dc3f442013-09-05 14:41:35 -04001707 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001708 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001709 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001710 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001711 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001712 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001713 } else if (xen_enabled()) {
1714 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001715 } else {
1716 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001717 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001718 flags |= (block->flags & RAM_SHARED ?
1719 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001720 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1721 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001722 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001723 /*
1724 * Remap needs to match alloc. Accelerators that
1725 * set phys_mem_alloc never remap. If they did,
1726 * we'd need a remap hook here.
1727 */
1728 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1729
Huang Yingcd19cfa2011-03-02 08:56:19 +01001730 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1731 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1732 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001733 }
1734 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001735 fprintf(stderr, "Could not remap addr: "
1736 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001737 length, addr);
1738 exit(1);
1739 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001740 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001741 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001742 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001743 }
1744 }
1745}
1746#endif /* !_WIN32 */
1747
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001748int qemu_get_ram_fd(ram_addr_t addr)
1749{
Mike Dayae3a7042013-09-05 14:41:35 -04001750 RAMBlock *block;
1751 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001752
Mike Day0dc3f442013-09-05 14:41:35 -04001753 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001754 block = qemu_get_ram_block(addr);
1755 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001756 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001757 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001758}
1759
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001760void qemu_set_ram_fd(ram_addr_t addr, int fd)
1761{
1762 RAMBlock *block;
1763
1764 rcu_read_lock();
1765 block = qemu_get_ram_block(addr);
1766 block->fd = fd;
1767 rcu_read_unlock();
1768}
1769
Damjan Marion3fd74b82014-06-26 23:01:32 +02001770void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1771{
Mike Dayae3a7042013-09-05 14:41:35 -04001772 RAMBlock *block;
1773 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001774
Mike Day0dc3f442013-09-05 14:41:35 -04001775 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001776 block = qemu_get_ram_block(addr);
1777 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001778 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001779 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001780}
1781
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001782/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001783 * This should not be used for general purpose DMA. Use address_space_map
1784 * or address_space_rw instead. For local memory (e.g. video ram) that the
1785 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001786 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001787 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001788 */
1789void *qemu_get_ram_ptr(ram_addr_t addr)
1790{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001791 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001792
1793 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001794 /* We need to check if the requested address is in the RAM
1795 * because we don't want to map the entire memory in QEMU.
1796 * In that case just map until the end of the page.
1797 */
1798 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001799 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001800 }
Mike Dayae3a7042013-09-05 14:41:35 -04001801
1802 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001803 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001804 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001805}
1806
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001807/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001808 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001809 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001810 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001811 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001812static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001813{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001814 RAMBlock *block;
1815 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001816 if (*size == 0) {
1817 return NULL;
1818 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001819
1820 block = qemu_get_ram_block(addr);
1821 offset_inside_block = addr - block->offset;
1822 *size = MIN(*size, block->max_length - offset_inside_block);
1823
1824 if (xen_enabled() && block->host == NULL) {
1825 /* We need to check if the requested address is in the RAM
1826 * because we don't want to map the entire memory in QEMU.
1827 * In that case just map the requested area.
1828 */
1829 if (block->offset == 0) {
1830 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001831 }
1832
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001833 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001834 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001835
1836 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001837}
1838
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001839/*
1840 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1841 * in that RAMBlock.
1842 *
1843 * ptr: Host pointer to look up
1844 * round_offset: If true round the result offset down to a page boundary
1845 * *ram_addr: set to result ram_addr
1846 * *offset: set to result offset within the RAMBlock
1847 *
1848 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001849 *
1850 * By the time this function returns, the returned pointer is not protected
1851 * by RCU anymore. If the caller is not within an RCU critical section and
1852 * does not hold the iothread lock, it must have other means of protecting the
1853 * pointer, such as a reference to the region that includes the incoming
1854 * ram_addr_t.
1855 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001856RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1857 ram_addr_t *ram_addr,
1858 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001859{
pbrook94a6b542009-04-11 17:15:54 +00001860 RAMBlock *block;
1861 uint8_t *host = ptr;
1862
Jan Kiszka868bb332011-06-21 22:59:09 +02001863 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001864 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001865 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001866 block = qemu_get_ram_block(*ram_addr);
1867 if (block) {
1868 *offset = (host - block->host);
1869 }
Mike Day0dc3f442013-09-05 14:41:35 -04001870 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001871 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001872 }
1873
Mike Day0dc3f442013-09-05 14:41:35 -04001874 rcu_read_lock();
1875 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001876 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001877 goto found;
1878 }
1879
Mike Day0dc3f442013-09-05 14:41:35 -04001880 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001881 /* This case append when the block is not mapped. */
1882 if (block->host == NULL) {
1883 continue;
1884 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001885 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001886 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001887 }
pbrook94a6b542009-04-11 17:15:54 +00001888 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001889
Mike Day0dc3f442013-09-05 14:41:35 -04001890 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001891 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001892
1893found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001894 *offset = (host - block->host);
1895 if (round_offset) {
1896 *offset &= TARGET_PAGE_MASK;
1897 }
1898 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001899 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001900 return block;
1901}
1902
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001903/*
1904 * Finds the named RAMBlock
1905 *
1906 * name: The name of RAMBlock to find
1907 *
1908 * Returns: RAMBlock (or NULL if not found)
1909 */
1910RAMBlock *qemu_ram_block_by_name(const char *name)
1911{
1912 RAMBlock *block;
1913
1914 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1915 if (!strcmp(name, block->idstr)) {
1916 return block;
1917 }
1918 }
1919
1920 return NULL;
1921}
1922
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001923/* Some of the softmmu routines need to translate from a host pointer
1924 (typically a TLB entry) back to a ram offset. */
1925MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1926{
1927 RAMBlock *block;
1928 ram_addr_t offset; /* Not used */
1929
1930 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1931
1932 if (!block) {
1933 return NULL;
1934 }
1935
1936 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001937}
Alex Williamsonf471a172010-06-11 11:11:42 -06001938
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001939/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001940static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001941 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001942{
Juan Quintela52159192013-10-08 12:44:04 +02001943 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001944 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001945 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001946 switch (size) {
1947 case 1:
1948 stb_p(qemu_get_ram_ptr(ram_addr), val);
1949 break;
1950 case 2:
1951 stw_p(qemu_get_ram_ptr(ram_addr), val);
1952 break;
1953 case 4:
1954 stl_p(qemu_get_ram_ptr(ram_addr), val);
1955 break;
1956 default:
1957 abort();
1958 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001959 /* Set both VGA and migration bits for simplicity and to remove
1960 * the notdirty callback faster.
1961 */
1962 cpu_physical_memory_set_dirty_range(ram_addr, size,
1963 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001964 /* we remove the notdirty callback only if the code has been
1965 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001966 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001967 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001968 }
bellard1ccde1c2004-02-06 19:46:14 +00001969}
1970
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001971static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1972 unsigned size, bool is_write)
1973{
1974 return is_write;
1975}
1976
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001977static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001978 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001979 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001980 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001981};
1982
pbrook0f459d12008-06-09 00:20:13 +00001983/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001984static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001985{
Andreas Färber93afead2013-08-26 03:41:01 +02001986 CPUState *cpu = current_cpu;
1987 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001988 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001989 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001990 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001991 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001992
Andreas Färberff4700b2013-08-26 18:23:18 +02001993 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001994 /* We re-entered the check after replacing the TB. Now raise
1995 * the debug interrupt so that is will trigger after the
1996 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001997 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001998 return;
1999 }
Andreas Färber93afead2013-08-26 03:41:01 +02002000 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002001 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002002 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2003 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002004 if (flags == BP_MEM_READ) {
2005 wp->flags |= BP_WATCHPOINT_HIT_READ;
2006 } else {
2007 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2008 }
2009 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002010 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002011 if (!cpu->watchpoint_hit) {
2012 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002013 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002014 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002015 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002016 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002017 } else {
2018 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002019 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002020 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002021 }
aliguori06d55cc2008-11-18 20:24:06 +00002022 }
aliguori6e140f22008-11-18 20:37:55 +00002023 } else {
2024 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002025 }
2026 }
2027}
2028
pbrook6658ffb2007-03-16 23:58:11 +00002029/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2030 so these check for a hit then pass through to the normal out-of-line
2031 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002032static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2033 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002034{
Peter Maydell66b9b432015-04-26 16:49:24 +01002035 MemTxResult res;
2036 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002037
Peter Maydell66b9b432015-04-26 16:49:24 +01002038 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002039 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002040 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002041 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002042 break;
2043 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002044 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002045 break;
2046 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002047 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002048 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002049 default: abort();
2050 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002051 *pdata = data;
2052 return res;
2053}
2054
2055static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2056 uint64_t val, unsigned size,
2057 MemTxAttrs attrs)
2058{
2059 MemTxResult res;
2060
2061 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2062 switch (size) {
2063 case 1:
2064 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2065 break;
2066 case 2:
2067 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2068 break;
2069 case 4:
2070 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2071 break;
2072 default: abort();
2073 }
2074 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002075}
2076
Avi Kivity1ec9b902012-01-02 12:47:48 +02002077static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002078 .read_with_attrs = watch_mem_read,
2079 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002080 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002081};
pbrook6658ffb2007-03-16 23:58:11 +00002082
Peter Maydellf25a49e2015-04-26 16:49:24 +01002083static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2084 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002085{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002086 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002087 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002088 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002089
blueswir1db7b5422007-05-26 17:36:03 +00002090#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002091 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002092 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002093#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002094 res = address_space_read(subpage->as, addr + subpage->base,
2095 attrs, buf, len);
2096 if (res) {
2097 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002098 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002099 switch (len) {
2100 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002101 *data = ldub_p(buf);
2102 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002103 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002104 *data = lduw_p(buf);
2105 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002106 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002107 *data = ldl_p(buf);
2108 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002109 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002110 *data = ldq_p(buf);
2111 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002112 default:
2113 abort();
2114 }
blueswir1db7b5422007-05-26 17:36:03 +00002115}
2116
Peter Maydellf25a49e2015-04-26 16:49:24 +01002117static MemTxResult subpage_write(void *opaque, hwaddr addr,
2118 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002119{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002120 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002121 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002122
blueswir1db7b5422007-05-26 17:36:03 +00002123#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002124 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002125 " value %"PRIx64"\n",
2126 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002127#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128 switch (len) {
2129 case 1:
2130 stb_p(buf, value);
2131 break;
2132 case 2:
2133 stw_p(buf, value);
2134 break;
2135 case 4:
2136 stl_p(buf, value);
2137 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002138 case 8:
2139 stq_p(buf, value);
2140 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002141 default:
2142 abort();
2143 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002144 return address_space_write(subpage->as, addr + subpage->base,
2145 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002146}
2147
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002148static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002149 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002150{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002151 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002152#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002153 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002154 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002155#endif
2156
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002157 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002158 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002159}
2160
Avi Kivity70c68e42012-01-02 12:32:48 +02002161static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002162 .read_with_attrs = subpage_read,
2163 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002164 .impl.min_access_size = 1,
2165 .impl.max_access_size = 8,
2166 .valid.min_access_size = 1,
2167 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002168 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002169 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002170};
2171
Anthony Liguoric227f092009-10-01 16:12:16 -05002172static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002173 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002174{
2175 int idx, eidx;
2176
2177 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2178 return -1;
2179 idx = SUBPAGE_IDX(start);
2180 eidx = SUBPAGE_IDX(end);
2181#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002182 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2183 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002184#endif
blueswir1db7b5422007-05-26 17:36:03 +00002185 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002186 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002187 }
2188
2189 return 0;
2190}
2191
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002192static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002193{
Anthony Liguoric227f092009-10-01 16:12:16 -05002194 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002195
Anthony Liguori7267c092011-08-20 22:09:37 -05002196 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002197
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002198 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002199 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002200 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002201 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002202 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002203#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002204 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2205 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002206#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002207 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002208
2209 return mmio;
2210}
2211
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002212static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2213 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002214{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002215 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002216 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002217 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002218 .mr = mr,
2219 .offset_within_address_space = 0,
2220 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002221 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002222 };
2223
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002224 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002225}
2226
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002227MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002228{
Peter Maydell32857f42015-10-01 15:29:50 +01002229 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2230 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002231 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002232
2233 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002234}
2235
Avi Kivitye9179ce2009-06-14 11:38:52 +03002236static void io_mem_init(void)
2237{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002238 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002239 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002240 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002241 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002242 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002243 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002244 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002245}
2246
Avi Kivityac1970f2012-10-03 16:22:53 +02002247static void mem_begin(MemoryListener *listener)
2248{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002249 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002250 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2251 uint16_t n;
2252
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002253 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002254 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002255 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002256 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002257 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002258 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002259 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002260 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002261
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002262 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002263 d->as = as;
2264 as->next_dispatch = d;
2265}
2266
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002267static void address_space_dispatch_free(AddressSpaceDispatch *d)
2268{
2269 phys_sections_free(&d->map);
2270 g_free(d);
2271}
2272
Paolo Bonzini00752702013-05-29 12:13:54 +02002273static void mem_commit(MemoryListener *listener)
2274{
2275 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002276 AddressSpaceDispatch *cur = as->dispatch;
2277 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002278
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002279 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002280
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002281 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002282 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002283 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002284 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002285}
2286
Avi Kivity1d711482012-10-02 18:54:45 +02002287static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002288{
Peter Maydell32857f42015-10-01 15:29:50 +01002289 CPUAddressSpace *cpuas;
2290 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002291
2292 /* since each CPU stores ram addresses in its TLB cache, we must
2293 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002294 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2295 cpu_reloading_memory_map();
2296 /* The CPU and TLB are protected by the iothread lock.
2297 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2298 * may have split the RCU critical section.
2299 */
2300 d = atomic_rcu_read(&cpuas->as->dispatch);
2301 cpuas->memory_dispatch = d;
2302 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002303}
2304
Avi Kivityac1970f2012-10-03 16:22:53 +02002305void address_space_init_dispatch(AddressSpace *as)
2306{
Paolo Bonzini00752702013-05-29 12:13:54 +02002307 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002308 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002309 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002310 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002311 .region_add = mem_add,
2312 .region_nop = mem_add,
2313 .priority = 0,
2314 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002315 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002316}
2317
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002318void address_space_unregister(AddressSpace *as)
2319{
2320 memory_listener_unregister(&as->dispatch_listener);
2321}
2322
Avi Kivity83f3c252012-10-07 12:59:55 +02002323void address_space_destroy_dispatch(AddressSpace *as)
2324{
2325 AddressSpaceDispatch *d = as->dispatch;
2326
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002327 atomic_rcu_set(&as->dispatch, NULL);
2328 if (d) {
2329 call_rcu(d, address_space_dispatch_free, rcu);
2330 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002331}
2332
Avi Kivity62152b82011-07-26 14:26:14 +03002333static void memory_map_init(void)
2334{
Anthony Liguori7267c092011-08-20 22:09:37 -05002335 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002336
Paolo Bonzini57271d62013-11-07 17:14:37 +01002337 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002338 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002339
Anthony Liguori7267c092011-08-20 22:09:37 -05002340 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002341 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2342 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002343 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002344}
2345
2346MemoryRegion *get_system_memory(void)
2347{
2348 return system_memory;
2349}
2350
Avi Kivity309cb472011-08-08 16:09:03 +03002351MemoryRegion *get_system_io(void)
2352{
2353 return system_io;
2354}
2355
pbrooke2eef172008-06-08 01:09:01 +00002356#endif /* !defined(CONFIG_USER_ONLY) */
2357
bellard13eb76e2004-01-24 15:23:36 +00002358/* physical memory access (slow version, mainly for debug) */
2359#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002360int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002361 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002362{
2363 int l, flags;
2364 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002365 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002366
2367 while (len > 0) {
2368 page = addr & TARGET_PAGE_MASK;
2369 l = (page + TARGET_PAGE_SIZE) - addr;
2370 if (l > len)
2371 l = len;
2372 flags = page_get_flags(page);
2373 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002374 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002375 if (is_write) {
2376 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002377 return -1;
bellard579a97f2007-11-11 14:26:47 +00002378 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002379 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002380 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002381 memcpy(p, buf, l);
2382 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002383 } else {
2384 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002385 return -1;
bellard579a97f2007-11-11 14:26:47 +00002386 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002387 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002388 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002389 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002390 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002391 }
2392 len -= l;
2393 buf += l;
2394 addr += l;
2395 }
Paul Brooka68fe892010-03-01 00:08:59 +00002396 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002397}
bellard8df1cd02005-01-28 22:37:22 +00002398
bellard13eb76e2004-01-24 15:23:36 +00002399#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002400
Paolo Bonzini845b6212015-03-23 11:45:53 +01002401static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002402 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002403{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002404 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2405 /* No early return if dirty_log_mask is or becomes 0, because
2406 * cpu_physical_memory_set_dirty_range will still call
2407 * xen_modified_memory.
2408 */
2409 if (dirty_log_mask) {
2410 dirty_log_mask =
2411 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002412 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002413 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2414 tb_invalidate_phys_range(addr, addr + length);
2415 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2416 }
2417 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002418}
2419
Richard Henderson23326162013-07-08 14:55:59 -07002420static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002421{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002422 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002423
2424 /* Regions are assumed to support 1-4 byte accesses unless
2425 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002426 if (access_size_max == 0) {
2427 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002428 }
Richard Henderson23326162013-07-08 14:55:59 -07002429
2430 /* Bound the maximum access by the alignment of the address. */
2431 if (!mr->ops->impl.unaligned) {
2432 unsigned align_size_max = addr & -addr;
2433 if (align_size_max != 0 && align_size_max < access_size_max) {
2434 access_size_max = align_size_max;
2435 }
2436 }
2437
2438 /* Don't attempt accesses larger than the maximum. */
2439 if (l > access_size_max) {
2440 l = access_size_max;
2441 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002442 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002443
2444 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002445}
2446
Jan Kiszka4840f102015-06-18 18:47:22 +02002447static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002448{
Jan Kiszka4840f102015-06-18 18:47:22 +02002449 bool unlocked = !qemu_mutex_iothread_locked();
2450 bool release_lock = false;
2451
2452 if (unlocked && mr->global_locking) {
2453 qemu_mutex_lock_iothread();
2454 unlocked = false;
2455 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002456 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002457 if (mr->flush_coalesced_mmio) {
2458 if (unlocked) {
2459 qemu_mutex_lock_iothread();
2460 }
2461 qemu_flush_coalesced_mmio_buffer();
2462 if (unlocked) {
2463 qemu_mutex_unlock_iothread();
2464 }
2465 }
2466
2467 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002468}
2469
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002470/* Called within RCU critical section. */
2471static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2472 MemTxAttrs attrs,
2473 const uint8_t *buf,
2474 int len, hwaddr addr1,
2475 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002476{
bellard13eb76e2004-01-24 15:23:36 +00002477 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002478 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002479 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002480 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002481
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002482 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002483 if (!memory_access_is_direct(mr, true)) {
2484 release_lock |= prepare_mmio_access(mr);
2485 l = memory_access_size(mr, l, addr1);
2486 /* XXX: could force current_cpu to NULL to avoid
2487 potential bugs */
2488 switch (l) {
2489 case 8:
2490 /* 64 bit write access */
2491 val = ldq_p(buf);
2492 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2493 attrs);
2494 break;
2495 case 4:
2496 /* 32 bit write access */
2497 val = ldl_p(buf);
2498 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2499 attrs);
2500 break;
2501 case 2:
2502 /* 16 bit write access */
2503 val = lduw_p(buf);
2504 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2505 attrs);
2506 break;
2507 case 1:
2508 /* 8 bit write access */
2509 val = ldub_p(buf);
2510 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2511 attrs);
2512 break;
2513 default:
2514 abort();
bellard13eb76e2004-01-24 15:23:36 +00002515 }
2516 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002517 addr1 += memory_region_get_ram_addr(mr);
2518 /* RAM case */
2519 ptr = qemu_get_ram_ptr(addr1);
2520 memcpy(ptr, buf, l);
2521 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002522 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002523
2524 if (release_lock) {
2525 qemu_mutex_unlock_iothread();
2526 release_lock = false;
2527 }
2528
bellard13eb76e2004-01-24 15:23:36 +00002529 len -= l;
2530 buf += l;
2531 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002532
2533 if (!len) {
2534 break;
2535 }
2536
2537 l = len;
2538 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002539 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002540
Peter Maydell3b643492015-04-26 16:49:23 +01002541 return result;
bellard13eb76e2004-01-24 15:23:36 +00002542}
bellard8df1cd02005-01-28 22:37:22 +00002543
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002544MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2545 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002546{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002547 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002548 hwaddr addr1;
2549 MemoryRegion *mr;
2550 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002551
2552 if (len > 0) {
2553 rcu_read_lock();
2554 l = len;
2555 mr = address_space_translate(as, addr, &addr1, &l, true);
2556 result = address_space_write_continue(as, addr, attrs, buf, len,
2557 addr1, l, mr);
2558 rcu_read_unlock();
2559 }
2560
2561 return result;
2562}
2563
2564/* Called within RCU critical section. */
2565MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2566 MemTxAttrs attrs, uint8_t *buf,
2567 int len, hwaddr addr1, hwaddr l,
2568 MemoryRegion *mr)
2569{
2570 uint8_t *ptr;
2571 uint64_t val;
2572 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002573 bool release_lock = false;
2574
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002575 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002576 if (!memory_access_is_direct(mr, false)) {
2577 /* I/O case */
2578 release_lock |= prepare_mmio_access(mr);
2579 l = memory_access_size(mr, l, addr1);
2580 switch (l) {
2581 case 8:
2582 /* 64 bit read access */
2583 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2584 attrs);
2585 stq_p(buf, val);
2586 break;
2587 case 4:
2588 /* 32 bit read access */
2589 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2590 attrs);
2591 stl_p(buf, val);
2592 break;
2593 case 2:
2594 /* 16 bit read access */
2595 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2596 attrs);
2597 stw_p(buf, val);
2598 break;
2599 case 1:
2600 /* 8 bit read access */
2601 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2602 attrs);
2603 stb_p(buf, val);
2604 break;
2605 default:
2606 abort();
2607 }
2608 } else {
2609 /* RAM case */
2610 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2611 memcpy(buf, ptr, l);
2612 }
2613
2614 if (release_lock) {
2615 qemu_mutex_unlock_iothread();
2616 release_lock = false;
2617 }
2618
2619 len -= l;
2620 buf += l;
2621 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002622
2623 if (!len) {
2624 break;
2625 }
2626
2627 l = len;
2628 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002629 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002630
2631 return result;
2632}
2633
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002634MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2635 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002636{
2637 hwaddr l;
2638 hwaddr addr1;
2639 MemoryRegion *mr;
2640 MemTxResult result = MEMTX_OK;
2641
2642 if (len > 0) {
2643 rcu_read_lock();
2644 l = len;
2645 mr = address_space_translate(as, addr, &addr1, &l, false);
2646 result = address_space_read_continue(as, addr, attrs, buf, len,
2647 addr1, l, mr);
2648 rcu_read_unlock();
2649 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002650
2651 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002652}
2653
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002654MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2655 uint8_t *buf, int len, bool is_write)
2656{
2657 if (is_write) {
2658 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2659 } else {
2660 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2661 }
2662}
Avi Kivityac1970f2012-10-03 16:22:53 +02002663
Avi Kivitya8170e52012-10-23 12:30:10 +02002664void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002665 int len, int is_write)
2666{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002667 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2668 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002669}
2670
Alexander Graf582b55a2013-12-11 14:17:44 +01002671enum write_rom_type {
2672 WRITE_DATA,
2673 FLUSH_CACHE,
2674};
2675
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002676static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002677 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002678{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002679 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002680 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002681 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002682 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002683
Paolo Bonzini41063e12015-03-18 14:21:43 +01002684 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002685 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002686 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002687 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002688
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002689 if (!(memory_region_is_ram(mr) ||
2690 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002691 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002692 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002693 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002694 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002695 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002696 switch (type) {
2697 case WRITE_DATA:
2698 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002699 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002700 break;
2701 case FLUSH_CACHE:
2702 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2703 break;
2704 }
bellardd0ecd2a2006-04-23 17:14:48 +00002705 }
2706 len -= l;
2707 buf += l;
2708 addr += l;
2709 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002710 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002711}
2712
Alexander Graf582b55a2013-12-11 14:17:44 +01002713/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002714void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002715 const uint8_t *buf, int len)
2716{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002717 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002718}
2719
2720void cpu_flush_icache_range(hwaddr start, int len)
2721{
2722 /*
2723 * This function should do the same thing as an icache flush that was
2724 * triggered from within the guest. For TCG we are always cache coherent,
2725 * so there is no need to flush anything. For KVM / Xen we need to flush
2726 * the host's instruction cache at least.
2727 */
2728 if (tcg_enabled()) {
2729 return;
2730 }
2731
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002732 cpu_physical_memory_write_rom_internal(&address_space_memory,
2733 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002734}
2735
aliguori6d16c2f2009-01-22 16:59:11 +00002736typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002737 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002738 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002739 hwaddr addr;
2740 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002741 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002742} BounceBuffer;
2743
2744static BounceBuffer bounce;
2745
aliguoriba223c22009-01-22 16:59:16 +00002746typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002747 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002748 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002749} MapClient;
2750
Fam Zheng38e047b2015-03-16 17:03:35 +08002751QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002752static QLIST_HEAD(map_client_list, MapClient) map_client_list
2753 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002754
Fam Zhenge95205e2015-03-16 17:03:37 +08002755static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002756{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002757 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002758 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002759}
2760
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002761static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002762{
2763 MapClient *client;
2764
Blue Swirl72cf2d42009-09-12 07:36:22 +00002765 while (!QLIST_EMPTY(&map_client_list)) {
2766 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002767 qemu_bh_schedule(client->bh);
2768 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002769 }
2770}
2771
Fam Zhenge95205e2015-03-16 17:03:37 +08002772void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002773{
2774 MapClient *client = g_malloc(sizeof(*client));
2775
Fam Zheng38e047b2015-03-16 17:03:35 +08002776 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002777 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002778 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002779 if (!atomic_read(&bounce.in_use)) {
2780 cpu_notify_map_clients_locked();
2781 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002782 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002783}
2784
Fam Zheng38e047b2015-03-16 17:03:35 +08002785void cpu_exec_init_all(void)
2786{
2787 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002788 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002789 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002790 qemu_mutex_init(&map_client_list_lock);
2791}
2792
Fam Zhenge95205e2015-03-16 17:03:37 +08002793void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002794{
Fam Zhenge95205e2015-03-16 17:03:37 +08002795 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002796
Fam Zhenge95205e2015-03-16 17:03:37 +08002797 qemu_mutex_lock(&map_client_list_lock);
2798 QLIST_FOREACH(client, &map_client_list, link) {
2799 if (client->bh == bh) {
2800 cpu_unregister_map_client_do(client);
2801 break;
2802 }
2803 }
2804 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002805}
2806
2807static void cpu_notify_map_clients(void)
2808{
Fam Zheng38e047b2015-03-16 17:03:35 +08002809 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002810 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002811 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002812}
2813
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002814bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2815{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002816 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002817 hwaddr l, xlat;
2818
Paolo Bonzini41063e12015-03-18 14:21:43 +01002819 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002820 while (len > 0) {
2821 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002822 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2823 if (!memory_access_is_direct(mr, is_write)) {
2824 l = memory_access_size(mr, l, addr);
2825 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002826 return false;
2827 }
2828 }
2829
2830 len -= l;
2831 addr += l;
2832 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002833 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002834 return true;
2835}
2836
aliguori6d16c2f2009-01-22 16:59:11 +00002837/* Map a physical memory region into a host virtual address.
2838 * May map a subset of the requested range, given by and returned in *plen.
2839 * May return NULL if resources needed to perform the mapping are exhausted.
2840 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002841 * Use cpu_register_map_client() to know when retrying the map operation is
2842 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002843 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002844void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002845 hwaddr addr,
2846 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002847 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002848{
Avi Kivitya8170e52012-10-23 12:30:10 +02002849 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002850 hwaddr done = 0;
2851 hwaddr l, xlat, base;
2852 MemoryRegion *mr, *this_mr;
2853 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002854 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002855
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002856 if (len == 0) {
2857 return NULL;
2858 }
aliguori6d16c2f2009-01-22 16:59:11 +00002859
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002860 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002861 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002862 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002863
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002864 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002865 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002866 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002867 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002868 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002869 /* Avoid unbounded allocations */
2870 l = MIN(l, TARGET_PAGE_SIZE);
2871 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002872 bounce.addr = addr;
2873 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002874
2875 memory_region_ref(mr);
2876 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002877 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002878 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2879 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002880 }
aliguori6d16c2f2009-01-22 16:59:11 +00002881
Paolo Bonzini41063e12015-03-18 14:21:43 +01002882 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002883 *plen = l;
2884 return bounce.buffer;
2885 }
2886
2887 base = xlat;
2888 raddr = memory_region_get_ram_addr(mr);
2889
2890 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002891 len -= l;
2892 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002893 done += l;
2894 if (len == 0) {
2895 break;
2896 }
2897
2898 l = len;
2899 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2900 if (this_mr != mr || xlat != base + done) {
2901 break;
2902 }
aliguori6d16c2f2009-01-22 16:59:11 +00002903 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002904
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002905 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002906 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002907 ptr = qemu_ram_ptr_length(raddr + base, plen);
2908 rcu_read_unlock();
2909
2910 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002911}
2912
Avi Kivityac1970f2012-10-03 16:22:53 +02002913/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002914 * Will also mark the memory as dirty if is_write == 1. access_len gives
2915 * the amount of memory that was actually read or written by the caller.
2916 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002917void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2918 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002919{
2920 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002921 MemoryRegion *mr;
2922 ram_addr_t addr1;
2923
2924 mr = qemu_ram_addr_from_host(buffer, &addr1);
2925 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002926 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002927 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002928 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002929 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002930 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002931 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002932 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002933 return;
2934 }
2935 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002936 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2937 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002938 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002939 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002940 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002941 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002942 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002943 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002944}
bellardd0ecd2a2006-04-23 17:14:48 +00002945
Avi Kivitya8170e52012-10-23 12:30:10 +02002946void *cpu_physical_memory_map(hwaddr addr,
2947 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002948 int is_write)
2949{
2950 return address_space_map(&address_space_memory, addr, plen, is_write);
2951}
2952
Avi Kivitya8170e52012-10-23 12:30:10 +02002953void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2954 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002955{
2956 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2957}
2958
bellard8df1cd02005-01-28 22:37:22 +00002959/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002960static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2961 MemTxAttrs attrs,
2962 MemTxResult *result,
2963 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002964{
bellard8df1cd02005-01-28 22:37:22 +00002965 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002966 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002967 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002968 hwaddr l = 4;
2969 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002970 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002971 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002972
Paolo Bonzini41063e12015-03-18 14:21:43 +01002973 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002974 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002975 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002976 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002977
bellard8df1cd02005-01-28 22:37:22 +00002978 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002979 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002980#if defined(TARGET_WORDS_BIGENDIAN)
2981 if (endian == DEVICE_LITTLE_ENDIAN) {
2982 val = bswap32(val);
2983 }
2984#else
2985 if (endian == DEVICE_BIG_ENDIAN) {
2986 val = bswap32(val);
2987 }
2988#endif
bellard8df1cd02005-01-28 22:37:22 +00002989 } else {
2990 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002991 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002992 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002993 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002994 switch (endian) {
2995 case DEVICE_LITTLE_ENDIAN:
2996 val = ldl_le_p(ptr);
2997 break;
2998 case DEVICE_BIG_ENDIAN:
2999 val = ldl_be_p(ptr);
3000 break;
3001 default:
3002 val = ldl_p(ptr);
3003 break;
3004 }
Peter Maydell50013112015-04-26 16:49:24 +01003005 r = MEMTX_OK;
3006 }
3007 if (result) {
3008 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003009 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003010 if (release_lock) {
3011 qemu_mutex_unlock_iothread();
3012 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003013 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003014 return val;
3015}
3016
Peter Maydell50013112015-04-26 16:49:24 +01003017uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3018 MemTxAttrs attrs, MemTxResult *result)
3019{
3020 return address_space_ldl_internal(as, addr, attrs, result,
3021 DEVICE_NATIVE_ENDIAN);
3022}
3023
3024uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3025 MemTxAttrs attrs, MemTxResult *result)
3026{
3027 return address_space_ldl_internal(as, addr, attrs, result,
3028 DEVICE_LITTLE_ENDIAN);
3029}
3030
3031uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3032 MemTxAttrs attrs, MemTxResult *result)
3033{
3034 return address_space_ldl_internal(as, addr, attrs, result,
3035 DEVICE_BIG_ENDIAN);
3036}
3037
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003038uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003039{
Peter Maydell50013112015-04-26 16:49:24 +01003040 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003041}
3042
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003043uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003044{
Peter Maydell50013112015-04-26 16:49:24 +01003045 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003046}
3047
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003048uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003049{
Peter Maydell50013112015-04-26 16:49:24 +01003050 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003051}
3052
bellard84b7b8e2005-11-28 21:19:04 +00003053/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003054static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3055 MemTxAttrs attrs,
3056 MemTxResult *result,
3057 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003058{
bellard84b7b8e2005-11-28 21:19:04 +00003059 uint8_t *ptr;
3060 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003061 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003062 hwaddr l = 8;
3063 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003064 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003065 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003066
Paolo Bonzini41063e12015-03-18 14:21:43 +01003067 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003068 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003069 false);
3070 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003071 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003072
bellard84b7b8e2005-11-28 21:19:04 +00003073 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003074 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003075#if defined(TARGET_WORDS_BIGENDIAN)
3076 if (endian == DEVICE_LITTLE_ENDIAN) {
3077 val = bswap64(val);
3078 }
3079#else
3080 if (endian == DEVICE_BIG_ENDIAN) {
3081 val = bswap64(val);
3082 }
3083#endif
bellard84b7b8e2005-11-28 21:19:04 +00003084 } else {
3085 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003086 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003087 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003088 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003089 switch (endian) {
3090 case DEVICE_LITTLE_ENDIAN:
3091 val = ldq_le_p(ptr);
3092 break;
3093 case DEVICE_BIG_ENDIAN:
3094 val = ldq_be_p(ptr);
3095 break;
3096 default:
3097 val = ldq_p(ptr);
3098 break;
3099 }
Peter Maydell50013112015-04-26 16:49:24 +01003100 r = MEMTX_OK;
3101 }
3102 if (result) {
3103 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003104 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003105 if (release_lock) {
3106 qemu_mutex_unlock_iothread();
3107 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003108 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003109 return val;
3110}
3111
Peter Maydell50013112015-04-26 16:49:24 +01003112uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3113 MemTxAttrs attrs, MemTxResult *result)
3114{
3115 return address_space_ldq_internal(as, addr, attrs, result,
3116 DEVICE_NATIVE_ENDIAN);
3117}
3118
3119uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3120 MemTxAttrs attrs, MemTxResult *result)
3121{
3122 return address_space_ldq_internal(as, addr, attrs, result,
3123 DEVICE_LITTLE_ENDIAN);
3124}
3125
3126uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3127 MemTxAttrs attrs, MemTxResult *result)
3128{
3129 return address_space_ldq_internal(as, addr, attrs, result,
3130 DEVICE_BIG_ENDIAN);
3131}
3132
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003133uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003134{
Peter Maydell50013112015-04-26 16:49:24 +01003135 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003136}
3137
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003138uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003139{
Peter Maydell50013112015-04-26 16:49:24 +01003140 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003141}
3142
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003143uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003144{
Peter Maydell50013112015-04-26 16:49:24 +01003145 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003146}
3147
bellardaab33092005-10-30 20:48:42 +00003148/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003149uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3150 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003151{
3152 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003153 MemTxResult r;
3154
3155 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3156 if (result) {
3157 *result = r;
3158 }
bellardaab33092005-10-30 20:48:42 +00003159 return val;
3160}
3161
Peter Maydell50013112015-04-26 16:49:24 +01003162uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3163{
3164 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3165}
3166
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003167/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003168static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3169 hwaddr addr,
3170 MemTxAttrs attrs,
3171 MemTxResult *result,
3172 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003173{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003174 uint8_t *ptr;
3175 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003176 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003177 hwaddr l = 2;
3178 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003179 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003180 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003181
Paolo Bonzini41063e12015-03-18 14:21:43 +01003182 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003183 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003184 false);
3185 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003186 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003187
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003188 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003189 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003190#if defined(TARGET_WORDS_BIGENDIAN)
3191 if (endian == DEVICE_LITTLE_ENDIAN) {
3192 val = bswap16(val);
3193 }
3194#else
3195 if (endian == DEVICE_BIG_ENDIAN) {
3196 val = bswap16(val);
3197 }
3198#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003199 } else {
3200 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003201 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003202 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003203 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003204 switch (endian) {
3205 case DEVICE_LITTLE_ENDIAN:
3206 val = lduw_le_p(ptr);
3207 break;
3208 case DEVICE_BIG_ENDIAN:
3209 val = lduw_be_p(ptr);
3210 break;
3211 default:
3212 val = lduw_p(ptr);
3213 break;
3214 }
Peter Maydell50013112015-04-26 16:49:24 +01003215 r = MEMTX_OK;
3216 }
3217 if (result) {
3218 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003219 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003220 if (release_lock) {
3221 qemu_mutex_unlock_iothread();
3222 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003223 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003224 return val;
bellardaab33092005-10-30 20:48:42 +00003225}
3226
Peter Maydell50013112015-04-26 16:49:24 +01003227uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3228 MemTxAttrs attrs, MemTxResult *result)
3229{
3230 return address_space_lduw_internal(as, addr, attrs, result,
3231 DEVICE_NATIVE_ENDIAN);
3232}
3233
3234uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3235 MemTxAttrs attrs, MemTxResult *result)
3236{
3237 return address_space_lduw_internal(as, addr, attrs, result,
3238 DEVICE_LITTLE_ENDIAN);
3239}
3240
3241uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3242 MemTxAttrs attrs, MemTxResult *result)
3243{
3244 return address_space_lduw_internal(as, addr, attrs, result,
3245 DEVICE_BIG_ENDIAN);
3246}
3247
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003248uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003249{
Peter Maydell50013112015-04-26 16:49:24 +01003250 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003251}
3252
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003253uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003254{
Peter Maydell50013112015-04-26 16:49:24 +01003255 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003256}
3257
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003258uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003259{
Peter Maydell50013112015-04-26 16:49:24 +01003260 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003261}
3262
bellard8df1cd02005-01-28 22:37:22 +00003263/* warning: addr must be aligned. The ram page is not masked as dirty
3264 and the code inside is not invalidated. It is useful if the dirty
3265 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003266void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3267 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003268{
bellard8df1cd02005-01-28 22:37:22 +00003269 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003270 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003271 hwaddr l = 4;
3272 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003273 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003274 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003275 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003276
Paolo Bonzini41063e12015-03-18 14:21:43 +01003277 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003278 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003279 true);
3280 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003281 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003282
Peter Maydell50013112015-04-26 16:49:24 +01003283 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003284 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003285 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003286 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003287 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003288
Paolo Bonzini845b6212015-03-23 11:45:53 +01003289 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3290 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003291 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003292 r = MEMTX_OK;
3293 }
3294 if (result) {
3295 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003296 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003297 if (release_lock) {
3298 qemu_mutex_unlock_iothread();
3299 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003300 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003301}
3302
Peter Maydell50013112015-04-26 16:49:24 +01003303void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3304{
3305 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3306}
3307
bellard8df1cd02005-01-28 22:37:22 +00003308/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003309static inline void address_space_stl_internal(AddressSpace *as,
3310 hwaddr addr, uint32_t val,
3311 MemTxAttrs attrs,
3312 MemTxResult *result,
3313 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003314{
bellard8df1cd02005-01-28 22:37:22 +00003315 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003316 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003317 hwaddr l = 4;
3318 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003319 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003320 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003321
Paolo Bonzini41063e12015-03-18 14:21:43 +01003322 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003323 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003324 true);
3325 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003326 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003327
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003328#if defined(TARGET_WORDS_BIGENDIAN)
3329 if (endian == DEVICE_LITTLE_ENDIAN) {
3330 val = bswap32(val);
3331 }
3332#else
3333 if (endian == DEVICE_BIG_ENDIAN) {
3334 val = bswap32(val);
3335 }
3336#endif
Peter Maydell50013112015-04-26 16:49:24 +01003337 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003338 } else {
bellard8df1cd02005-01-28 22:37:22 +00003339 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003340 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003341 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003342 switch (endian) {
3343 case DEVICE_LITTLE_ENDIAN:
3344 stl_le_p(ptr, val);
3345 break;
3346 case DEVICE_BIG_ENDIAN:
3347 stl_be_p(ptr, val);
3348 break;
3349 default:
3350 stl_p(ptr, val);
3351 break;
3352 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003353 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003354 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003355 }
Peter Maydell50013112015-04-26 16:49:24 +01003356 if (result) {
3357 *result = r;
3358 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003359 if (release_lock) {
3360 qemu_mutex_unlock_iothread();
3361 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003362 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003363}
3364
3365void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3366 MemTxAttrs attrs, MemTxResult *result)
3367{
3368 address_space_stl_internal(as, addr, val, attrs, result,
3369 DEVICE_NATIVE_ENDIAN);
3370}
3371
3372void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3373 MemTxAttrs attrs, MemTxResult *result)
3374{
3375 address_space_stl_internal(as, addr, val, attrs, result,
3376 DEVICE_LITTLE_ENDIAN);
3377}
3378
3379void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3380 MemTxAttrs attrs, MemTxResult *result)
3381{
3382 address_space_stl_internal(as, addr, val, attrs, result,
3383 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003384}
3385
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003386void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003387{
Peter Maydell50013112015-04-26 16:49:24 +01003388 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003389}
3390
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003391void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003392{
Peter Maydell50013112015-04-26 16:49:24 +01003393 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003394}
3395
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003396void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003397{
Peter Maydell50013112015-04-26 16:49:24 +01003398 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003399}
3400
bellardaab33092005-10-30 20:48:42 +00003401/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003402void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3403 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003404{
3405 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003406 MemTxResult r;
3407
3408 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3409 if (result) {
3410 *result = r;
3411 }
3412}
3413
3414void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3415{
3416 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003417}
3418
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003419/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003420static inline void address_space_stw_internal(AddressSpace *as,
3421 hwaddr addr, uint32_t val,
3422 MemTxAttrs attrs,
3423 MemTxResult *result,
3424 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003425{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003426 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003427 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003428 hwaddr l = 2;
3429 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003430 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003431 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003432
Paolo Bonzini41063e12015-03-18 14:21:43 +01003433 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003434 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003435 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003436 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003437
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003438#if defined(TARGET_WORDS_BIGENDIAN)
3439 if (endian == DEVICE_LITTLE_ENDIAN) {
3440 val = bswap16(val);
3441 }
3442#else
3443 if (endian == DEVICE_BIG_ENDIAN) {
3444 val = bswap16(val);
3445 }
3446#endif
Peter Maydell50013112015-04-26 16:49:24 +01003447 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003448 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003449 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003450 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003451 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003452 switch (endian) {
3453 case DEVICE_LITTLE_ENDIAN:
3454 stw_le_p(ptr, val);
3455 break;
3456 case DEVICE_BIG_ENDIAN:
3457 stw_be_p(ptr, val);
3458 break;
3459 default:
3460 stw_p(ptr, val);
3461 break;
3462 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003463 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003464 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003465 }
Peter Maydell50013112015-04-26 16:49:24 +01003466 if (result) {
3467 *result = r;
3468 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003469 if (release_lock) {
3470 qemu_mutex_unlock_iothread();
3471 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003472 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003473}
3474
3475void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3476 MemTxAttrs attrs, MemTxResult *result)
3477{
3478 address_space_stw_internal(as, addr, val, attrs, result,
3479 DEVICE_NATIVE_ENDIAN);
3480}
3481
3482void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3483 MemTxAttrs attrs, MemTxResult *result)
3484{
3485 address_space_stw_internal(as, addr, val, attrs, result,
3486 DEVICE_LITTLE_ENDIAN);
3487}
3488
3489void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3490 MemTxAttrs attrs, MemTxResult *result)
3491{
3492 address_space_stw_internal(as, addr, val, attrs, result,
3493 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003494}
3495
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003496void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003497{
Peter Maydell50013112015-04-26 16:49:24 +01003498 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003499}
3500
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003501void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502{
Peter Maydell50013112015-04-26 16:49:24 +01003503 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504}
3505
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003506void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507{
Peter Maydell50013112015-04-26 16:49:24 +01003508 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003509}
3510
bellardaab33092005-10-30 20:48:42 +00003511/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003512void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3513 MemTxAttrs attrs, MemTxResult *result)
3514{
3515 MemTxResult r;
3516 val = tswap64(val);
3517 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3518 if (result) {
3519 *result = r;
3520 }
3521}
3522
3523void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3524 MemTxAttrs attrs, MemTxResult *result)
3525{
3526 MemTxResult r;
3527 val = cpu_to_le64(val);
3528 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3529 if (result) {
3530 *result = r;
3531 }
3532}
3533void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3534 MemTxAttrs attrs, MemTxResult *result)
3535{
3536 MemTxResult r;
3537 val = cpu_to_be64(val);
3538 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3539 if (result) {
3540 *result = r;
3541 }
3542}
3543
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003544void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003545{
Peter Maydell50013112015-04-26 16:49:24 +01003546 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003547}
3548
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003549void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003550{
Peter Maydell50013112015-04-26 16:49:24 +01003551 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003552}
3553
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003554void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003555{
Peter Maydell50013112015-04-26 16:49:24 +01003556 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003557}
3558
aliguori5e2972f2009-03-28 17:51:36 +00003559/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003560int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003561 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003562{
3563 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003564 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003565 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003566
3567 while (len > 0) {
3568 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003569 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003570 /* if no physical page mapped, return an error */
3571 if (phys_addr == -1)
3572 return -1;
3573 l = (page + TARGET_PAGE_SIZE) - addr;
3574 if (l > len)
3575 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003576 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003577 if (is_write) {
3578 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3579 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003580 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3581 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003582 }
bellard13eb76e2004-01-24 15:23:36 +00003583 len -= l;
3584 buf += l;
3585 addr += l;
3586 }
3587 return 0;
3588}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003589
3590/*
3591 * Allows code that needs to deal with migration bitmaps etc to still be built
3592 * target independent.
3593 */
3594size_t qemu_target_page_bits(void)
3595{
3596 return TARGET_PAGE_BITS;
3597}
3598
Paul Brooka68fe892010-03-01 00:08:59 +00003599#endif
bellard13eb76e2004-01-24 15:23:36 +00003600
Blue Swirl8e4a4242013-01-06 18:30:17 +00003601/*
3602 * A helper function for the _utterly broken_ virtio device model to find out if
3603 * it's running on a big endian machine. Don't do this at home kids!
3604 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003605bool target_words_bigendian(void);
3606bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003607{
3608#if defined(TARGET_WORDS_BIGENDIAN)
3609 return true;
3610#else
3611 return false;
3612#endif
3613}
3614
Wen Congyang76f35532012-05-07 12:04:18 +08003615#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003616bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003617{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003618 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003619 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003620 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003621
Paolo Bonzini41063e12015-03-18 14:21:43 +01003622 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003623 mr = address_space_translate(&address_space_memory,
3624 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003625
Paolo Bonzini41063e12015-03-18 14:21:43 +01003626 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3627 rcu_read_unlock();
3628 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003629}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003630
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003631int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003632{
3633 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003634 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003635
Mike Day0dc3f442013-09-05 14:41:35 -04003636 rcu_read_lock();
3637 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003638 ret = func(block->idstr, block->host, block->offset,
3639 block->used_length, opaque);
3640 if (ret) {
3641 break;
3642 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003643 }
Mike Day0dc3f442013-09-05 14:41:35 -04003644 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003645 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003646}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003647#endif