blob: d12cd42f4cc16cb3778a75b2c33a47137e78eadb [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030091/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030092 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030093#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200314static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200315 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000316{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200317 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200318 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200319 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200320
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200321 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200322 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200324 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200325 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100326 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200327 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200328
329 if (sections[lp.ptr].size.hi ||
330 range_covers_byte(sections[lp.ptr].offset_within_address_space,
331 sections[lp.ptr].size.lo, addr)) {
332 return &sections[lp.ptr];
333 } else {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200336}
337
Blue Swirle5548612012-04-21 13:08:33 +0000338bool memory_region_is_unassigned(MemoryRegion *mr)
339{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200340 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000341 && mr != &io_mem_watch;
342}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100344/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200345static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 hwaddr addr,
347 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200348{
Jan Kiszka90260c62013-05-26 21:46:51 +0200349 MemoryRegionSection *section;
350 subpage_t *subpage;
351
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 if (resolve_subpage && section->mr->subpage) {
354 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200355 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 }
357 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200358}
359
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100360/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200361static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200362address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200363 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200364{
365 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200366 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100367 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200368
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200369 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200370 /* Compute offset within MemoryRegionSection */
371 addr -= section->offset_within_address_space;
372
373 /* Compute offset within MemoryRegion */
374 *xlat = addr + section->offset_within_region;
375
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200376 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200377
378 /* MMIO registers can be expected to perform full-width accesses based only
379 * on their address, without considering adjacent registers that could
380 * decode to completely different MemoryRegions. When such registers
381 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
382 * regions overlap wildly. For this reason we cannot clamp the accesses
383 * here.
384 *
385 * If the length is small (as is the case for address_space_ldl/stl),
386 * everything works fine. If the incoming length is large, however,
387 * the caller really has to do the clamping through memory_access_size.
388 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200390 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200391 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
392 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 return section;
394}
Jan Kiszka90260c62013-05-26 21:46:51 +0200395
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100396static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
397{
398 if (memory_region_is_ram(mr)) {
399 return !(is_write && mr->readonly);
400 }
401 if (memory_region_is_romd(mr)) {
402 return !is_write;
403 }
404
405 return false;
406}
407
Paolo Bonzini41063e12015-03-18 14:21:43 +0100408/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200409MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
410 hwaddr *xlat, hwaddr *plen,
411 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200412{
Avi Kivity30951152012-10-30 13:47:46 +0200413 IOMMUTLBEntry iotlb;
414 MemoryRegionSection *section;
415 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200416
417 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100418 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
419 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200420 mr = section->mr;
421
422 if (!mr->iommu_ops) {
423 break;
424 }
425
Le Tan8d7b8cb2014-08-16 13:55:37 +0800426 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200427 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
428 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700429 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200430 if (!(iotlb.perm & (1 << is_write))) {
431 mr = &io_mem_unassigned;
432 break;
433 }
434
435 as = iotlb.target_as;
436 }
437
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000438 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100439 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100441 }
442
Avi Kivity30951152012-10-30 13:47:46 +0200443 *xlat = addr;
444 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200445}
446
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100447/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200448MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200449address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
450 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200451{
Avi Kivity30951152012-10-30 13:47:46 +0200452 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100453 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200454 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200455
456 assert(!section->mr->iommu_ops);
457 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200458}
bellard9fa3e852004-01-04 18:06:42 +0000459#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000460
Andreas Färberb170fce2013-01-20 20:23:22 +0100461#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000462
Juan Quintelae59fb372009-09-29 22:48:21 +0200463static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200464{
Andreas Färber259186a2013-01-17 18:51:17 +0100465 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200466
aurel323098dba2009-03-07 21:28:24 +0000467 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
468 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100469 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100470 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000471
472 return 0;
473}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475static int cpu_common_pre_load(void *opaque)
476{
477 CPUState *cpu = opaque;
478
Paolo Bonziniadee6422014-12-19 12:53:14 +0100479 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400480
481 return 0;
482}
483
484static bool cpu_common_exception_index_needed(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489}
490
491static const VMStateDescription vmstate_cpu_common_exception_index = {
492 .name = "cpu_common/exception_index",
493 .version_id = 1,
494 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200495 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496 .fields = (VMStateField[]) {
497 VMSTATE_INT32(exception_index, CPUState),
498 VMSTATE_END_OF_LIST()
499 }
500};
501
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300502static bool cpu_common_crash_occurred_needed(void *opaque)
503{
504 CPUState *cpu = opaque;
505
506 return cpu->crash_occurred;
507}
508
509static const VMStateDescription vmstate_cpu_common_crash_occurred = {
510 .name = "cpu_common/crash_occurred",
511 .version_id = 1,
512 .minimum_version_id = 1,
513 .needed = cpu_common_crash_occurred_needed,
514 .fields = (VMStateField[]) {
515 VMSTATE_BOOL(crash_occurred, CPUState),
516 VMSTATE_END_OF_LIST()
517 }
518};
519
Andreas Färber1a1562f2013-06-17 04:09:11 +0200520const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 .name = "cpu_common",
522 .version_id = 1,
523 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200525 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200526 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100527 VMSTATE_UINT32(halted, CPUState),
528 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200529 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200531 .subsections = (const VMStateDescription*[]) {
532 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300533 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200534 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 }
536};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200537
pbrook9656f322008-07-01 20:01:19 +0000538#endif
539
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100540CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400541{
Andreas Färberbdc44642013-06-24 23:50:24 +0200542 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400543
Andreas Färberbdc44642013-06-24 23:50:24 +0200544 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100545 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200546 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100547 }
Glauber Costa950f1472009-06-09 12:15:18 -0400548 }
549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400551}
552
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000553#if !defined(CONFIG_USER_ONLY)
554void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
555{
556 /* We only support one address space per cpu at the moment. */
557 assert(cpu->as == as);
558
Peter Maydell32857f42015-10-01 15:29:50 +0100559 if (cpu->cpu_ases) {
560 /* We've already registered the listener for our only AS */
561 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562 }
Peter Maydell32857f42015-10-01 15:29:50 +0100563
564 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
565 cpu->cpu_ases[0].cpu = cpu;
566 cpu->cpu_ases[0].as = as;
567 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
568 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000569}
570#endif
571
Bharata B Raob7bca732015-06-23 19:31:13 -0700572#ifndef CONFIG_USER_ONLY
573static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
574
575static int cpu_get_free_index(Error **errp)
576{
577 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
578
579 if (cpu >= MAX_CPUMASK_BITS) {
580 error_setg(errp, "Trying to use more CPUs than max of %d",
581 MAX_CPUMASK_BITS);
582 return -1;
583 }
584
585 bitmap_set(cpu_index_map, cpu, 1);
586 return cpu;
587}
588
589void cpu_exec_exit(CPUState *cpu)
590{
591 if (cpu->cpu_index == -1) {
592 /* cpu_index was never allocated by this @cpu or was already freed. */
593 return;
594 }
595
596 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
597 cpu->cpu_index = -1;
598}
599#else
600
601static int cpu_get_free_index(Error **errp)
602{
603 CPUState *some_cpu;
604 int cpu_index = 0;
605
606 CPU_FOREACH(some_cpu) {
607 cpu_index++;
608 }
609 return cpu_index;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614}
615#endif
616
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700617void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000618{
Andreas Färberb170fce2013-01-20 20:23:22 +0100619 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000620 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700621 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000622
Eduardo Habkost291135b2015-04-27 17:00:33 -0300623#ifndef CONFIG_USER_ONLY
624 cpu->as = &address_space_memory;
625 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300626#endif
627
pbrookc2764712009-03-07 15:24:59 +0000628#if defined(CONFIG_USER_ONLY)
629 cpu_list_lock();
630#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700631 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
632 if (local_err) {
633 error_propagate(errp, local_err);
634#if defined(CONFIG_USER_ONLY)
635 cpu_list_unlock();
636#endif
637 return;
bellard6a00d602005-11-21 23:25:50 +0000638 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200639 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000640#if defined(CONFIG_USER_ONLY)
641 cpu_list_unlock();
642#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200643 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
644 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
645 }
pbrookb3c77242008-06-30 16:31:04 +0000646#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600647 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700648 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200650 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000651#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100652 if (cc->vmsd != NULL) {
653 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
654 }
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Paul Brook94df27f2010-02-28 23:47:45 +0000657#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200658static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000659{
660 tb_invalidate_phys_page_range(pc, pc + 1, 0);
661}
662#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200663static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400664{
Max Filippove8262a12013-09-27 22:29:17 +0400665 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
666 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000667 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100668 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400669 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400670}
bellardc27004e2005-01-03 23:35:10 +0000671#endif
bellardd720b932004-04-25 17:57:43 +0000672
Paul Brookc527ee82010-03-01 03:31:14 +0000673#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200674void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000675
676{
677}
678
Peter Maydell3ee887e2014-09-12 14:06:48 +0100679int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
680 int flags)
681{
682 return -ENOSYS;
683}
684
685void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
686{
687}
688
Andreas Färber75a34032013-09-02 16:57:02 +0200689int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000690 int flags, CPUWatchpoint **watchpoint)
691{
692 return -ENOSYS;
693}
694#else
pbrook6658ffb2007-03-16 23:58:11 +0000695/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200696int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000697 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000698{
aliguoric0ce9982008-11-25 22:13:57 +0000699 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000700
Peter Maydell05068c02014-09-12 14:06:48 +0100701 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700702 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200703 error_report("tried to set invalid watchpoint at %"
704 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000705 return -EINVAL;
706 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500707 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000708
aliguoria1d1bb32008-11-18 20:07:32 +0000709 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100710 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000711 wp->flags = flags;
712
aliguori2dc9f412008-11-18 20:56:59 +0000713 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200714 if (flags & BP_GDB) {
715 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
716 } else {
717 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
718 }
aliguoria1d1bb32008-11-18 20:07:32 +0000719
Andreas Färber31b030d2013-09-04 01:29:02 +0200720 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000721
722 if (watchpoint)
723 *watchpoint = wp;
724 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000725}
726
aliguoria1d1bb32008-11-18 20:07:32 +0000727/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000729 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000730{
aliguoria1d1bb32008-11-18 20:07:32 +0000731 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000732
Andreas Färberff4700b2013-08-26 18:23:18 +0200733 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100734 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000735 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200736 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000737 return 0;
738 }
739 }
aliguoria1d1bb32008-11-18 20:07:32 +0000740 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000741}
742
aliguoria1d1bb32008-11-18 20:07:32 +0000743/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200744void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000745{
Andreas Färberff4700b2013-08-26 18:23:18 +0200746 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000747
Andreas Färber31b030d2013-09-04 01:29:02 +0200748 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000749
Anthony Liguori7267c092011-08-20 22:09:37 -0500750 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000751}
752
aliguoria1d1bb32008-11-18 20:07:32 +0000753/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200754void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000755{
aliguoric0ce9982008-11-25 22:13:57 +0000756 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000757
Andreas Färberff4700b2013-08-26 18:23:18 +0200758 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200759 if (wp->flags & mask) {
760 cpu_watchpoint_remove_by_ref(cpu, wp);
761 }
aliguoric0ce9982008-11-25 22:13:57 +0000762 }
aliguoria1d1bb32008-11-18 20:07:32 +0000763}
Peter Maydell05068c02014-09-12 14:06:48 +0100764
765/* Return true if this watchpoint address matches the specified
766 * access (ie the address range covered by the watchpoint overlaps
767 * partially or completely with the address range covered by the
768 * access).
769 */
770static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
771 vaddr addr,
772 vaddr len)
773{
774 /* We know the lengths are non-zero, but a little caution is
775 * required to avoid errors in the case where the range ends
776 * exactly at the top of the address space and so addr + len
777 * wraps round to zero.
778 */
779 vaddr wpend = wp->vaddr + wp->len - 1;
780 vaddr addrend = addr + len - 1;
781
782 return !(addr > wpend || wp->vaddr > addrend);
783}
784
Paul Brookc527ee82010-03-01 03:31:14 +0000785#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000786
787/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200788int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000789 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000792
Anthony Liguori7267c092011-08-20 22:09:37 -0500793 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000794
795 bp->pc = pc;
796 bp->flags = flags;
797
aliguori2dc9f412008-11-18 20:56:59 +0000798 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200801 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200802 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200803 }
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Andreas Färberf0c3c502013-08-26 21:22:53 +0200805 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000806
Andreas Färber00b941e2013-06-29 18:55:54 +0200807 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000808 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200809 }
aliguoria1d1bb32008-11-18 20:07:32 +0000810 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000811}
812
813/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200814int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000815{
aliguoria1d1bb32008-11-18 20:07:32 +0000816 CPUBreakpoint *bp;
817
Andreas Färberf0c3c502013-08-26 21:22:53 +0200818 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000819 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000821 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000822 }
bellard4c3a88a2003-07-26 12:06:08 +0000823 }
aliguoria1d1bb32008-11-18 20:07:32 +0000824 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000825}
826
aliguoria1d1bb32008-11-18 20:07:32 +0000827/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200828void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000829{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200830 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
831
832 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000833
Anthony Liguori7267c092011-08-20 22:09:37 -0500834 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000835}
836
837/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200838void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000839{
aliguoric0ce9982008-11-25 22:13:57 +0000840 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200843 if (bp->flags & mask) {
844 cpu_breakpoint_remove_by_ref(cpu, bp);
845 }
aliguoric0ce9982008-11-25 22:13:57 +0000846 }
bellard4c3a88a2003-07-26 12:06:08 +0000847}
848
bellardc33a3462003-07-29 20:50:33 +0000849/* enable or disable single step mode. EXCP_DEBUG is returned by the
850 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200851void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000852{
Andreas Färbered2803d2013-06-21 20:20:45 +0200853 if (cpu->singlestep_enabled != enabled) {
854 cpu->singlestep_enabled = enabled;
855 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200856 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200857 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100858 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000859 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700860 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862 }
bellardc33a3462003-07-29 20:50:33 +0000863}
864
Andreas Färbera47dddd2013-09-03 17:38:47 +0200865void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000866{
867 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000868 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000869
870 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000871 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000872 fprintf(stderr, "qemu: fatal: ");
873 vfprintf(stderr, fmt, ap);
874 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200875 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000876 if (qemu_log_enabled()) {
877 qemu_log("qemu: fatal: ");
878 qemu_log_vprintf(fmt, ap2);
879 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200880 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000881 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000882 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000883 }
pbrook493ae1f2007-11-23 16:53:59 +0000884 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000885 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300886 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200887#if defined(CONFIG_USER_ONLY)
888 {
889 struct sigaction act;
890 sigfillset(&act.sa_mask);
891 act.sa_handler = SIG_DFL;
892 sigaction(SIGABRT, &act, NULL);
893 }
894#endif
bellard75012672003-06-21 13:11:07 +0000895 abort();
896}
897
bellard01243112004-01-04 15:48:17 +0000898#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400899/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200900static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
901{
902 RAMBlock *block;
903
Paolo Bonzini43771532013-09-09 17:58:40 +0200904 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200905 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200906 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907 }
Mike Day0dc3f442013-09-05 14:41:35 -0400908 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200909 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200910 goto found;
911 }
912 }
913
914 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
915 abort();
916
917found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200918 /* It is safe to write mru_block outside the iothread lock. This
919 * is what happens:
920 *
921 * mru_block = xxx
922 * rcu_read_unlock()
923 * xxx removed from list
924 * rcu_read_lock()
925 * read mru_block
926 * mru_block = NULL;
927 * call_rcu(reclaim_ramblock, xxx);
928 * rcu_read_unlock()
929 *
930 * atomic_rcu_set is not needed here. The block was already published
931 * when it was placed into the list. Here we're just making an extra
932 * copy of the pointer.
933 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200934 ram_list.mru_block = block;
935 return block;
936}
937
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200938static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000939{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700940 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200942 RAMBlock *block;
943 ram_addr_t end;
944
945 end = TARGET_PAGE_ALIGN(start + length);
946 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000947
Mike Day0dc3f442013-09-05 14:41:35 -0400948 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 block = qemu_get_ram_block(start);
950 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200951 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700952 CPU_FOREACH(cpu) {
953 tlb_reset_dirty(cpu, start1, length);
954 }
Mike Day0dc3f442013-09-05 14:41:35 -0400955 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200956}
957
958/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000959bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
960 ram_addr_t length,
961 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200962{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963 unsigned long end, page;
964 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200965
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000966 if (length == 0) {
967 return false;
968 }
969
970 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
971 page = start >> TARGET_PAGE_BITS;
972 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
973 page, end - page);
974
975 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200976 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200977 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000978
979 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000980}
981
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100982/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200983hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200984 MemoryRegionSection *section,
985 target_ulong vaddr,
986 hwaddr paddr, hwaddr xlat,
987 int prot,
988 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000989{
Avi Kivitya8170e52012-10-23 12:30:10 +0200990 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000991 CPUWatchpoint *wp;
992
Blue Swirlcc5bea62012-04-14 14:56:48 +0000993 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000994 /* Normal RAM. */
995 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200996 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000997 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000999 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001000 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001001 }
1002 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001003 AddressSpaceDispatch *d;
1004
1005 d = atomic_rcu_read(&section->address_space->dispatch);
1006 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001007 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001008 }
1009
1010 /* Make accesses to pages with watchpoints go via the
1011 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001012 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001013 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001014 /* Avoid trapping reads of pages with a write breakpoint. */
1015 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001016 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001017 *address |= TLB_MMIO;
1018 break;
1019 }
1020 }
1021 }
1022
1023 return iotlb;
1024}
bellard9fa3e852004-01-04 18:06:42 +00001025#endif /* defined(CONFIG_USER_ONLY) */
1026
pbrooke2eef172008-06-08 01:09:01 +00001027#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001028
Anthony Liguoric227f092009-10-01 16:12:16 -05001029static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001030 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001031static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001032
Igor Mammedova2b257d2014-10-31 16:38:37 +00001033static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1034 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001035
1036/*
1037 * Set a custom physical guest memory alloator.
1038 * Accelerators with unusual needs may need this. Hopefully, we can
1039 * get rid of it eventually.
1040 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001041void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001042{
1043 phys_mem_alloc = alloc;
1044}
1045
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001046static uint16_t phys_section_add(PhysPageMap *map,
1047 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001048{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001049 /* The physical section number is ORed with a page-aligned
1050 * pointer to produce the iotlb entries. Thus it should
1051 * never overflow into the page-aligned value.
1052 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001054
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001055 if (map->sections_nb == map->sections_nb_alloc) {
1056 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1057 map->sections = g_renew(MemoryRegionSection, map->sections,
1058 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001059 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001061 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001062 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001063}
1064
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001065static void phys_section_destroy(MemoryRegion *mr)
1066{
Don Slutz55b4e802015-11-30 17:11:04 -05001067 bool have_sub_page = mr->subpage;
1068
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001069 memory_region_unref(mr);
1070
Don Slutz55b4e802015-11-30 17:11:04 -05001071 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001072 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001073 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001074 g_free(subpage);
1075 }
1076}
1077
Paolo Bonzini60926662013-05-29 12:30:26 +02001078static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001079{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001080 while (map->sections_nb > 0) {
1081 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001082 phys_section_destroy(section->mr);
1083 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001084 g_free(map->sections);
1085 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001086}
1087
Avi Kivityac1970f2012-10-03 16:22:53 +02001088static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001089{
1090 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001091 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001092 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001093 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001094 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001095 MemoryRegionSection subsection = {
1096 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001097 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001099 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001100
Avi Kivityf3705d52012-03-08 16:16:34 +02001101 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102
Avi Kivityf3705d52012-03-08 16:16:34 +02001103 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001104 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001105 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001106 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001107 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001110 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001111 }
1112 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001113 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001114 subpage_register(subpage, start, end,
1115 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001116}
1117
1118
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001119static void register_multipage(AddressSpaceDispatch *d,
1120 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001121{
Avi Kivitya8170e52012-10-23 12:30:10 +02001122 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001123 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001124 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1125 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001126
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001127 assert(num_pages);
1128 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001129}
1130
Avi Kivityac1970f2012-10-03 16:22:53 +02001131static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001132{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001133 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001134 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001135 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001136 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001137
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001138 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1139 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1140 - now.offset_within_address_space;
1141
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001142 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001143 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001144 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001145 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001146 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001147 while (int128_ne(remain.size, now.size)) {
1148 remain.size = int128_sub(remain.size, now.size);
1149 remain.offset_within_address_space += int128_get64(now.size);
1150 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001151 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001152 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001153 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001154 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001155 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001156 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001157 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001158 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001159 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001160 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161 }
1162}
1163
Sheng Yang62a27442010-01-26 19:21:16 +08001164void qemu_flush_coalesced_mmio_buffer(void)
1165{
1166 if (kvm_enabled())
1167 kvm_flush_coalesced_mmio_buffer();
1168}
1169
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001170void qemu_mutex_lock_ramlist(void)
1171{
1172 qemu_mutex_lock(&ram_list.mutex);
1173}
1174
1175void qemu_mutex_unlock_ramlist(void)
1176{
1177 qemu_mutex_unlock(&ram_list.mutex);
1178}
1179
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001180#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001181
1182#include <sys/vfs.h>
1183
1184#define HUGETLBFS_MAGIC 0x958458f6
1185
Hu Taofc7a5802014-09-09 13:28:01 +08001186static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001187{
1188 struct statfs fs;
1189 int ret;
1190
1191 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001192 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001193 } while (ret != 0 && errno == EINTR);
1194
1195 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001196 error_setg_errno(errp, errno, "failed to get page size of file %s",
1197 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001198 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199 }
1200
Marcelo Tosattic9027602010-03-01 20:25:08 -03001201 return fs.f_bsize;
1202}
1203
Alex Williamson04b16652010-07-02 11:13:17 -06001204static void *file_ram_alloc(RAMBlock *block,
1205 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001206 const char *path,
1207 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001208{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001209 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001210 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001211 char *sanitized_name;
1212 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001213 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001214 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001215 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001216 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001217
Hu Taofc7a5802014-09-09 13:28:01 +08001218 hpagesize = gethugepagesize(path, &local_err);
1219 if (local_err) {
1220 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001221 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001222 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001223 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224
1225 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001226 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1227 "or larger than huge page size 0x%" PRIx64,
1228 memory, hpagesize);
1229 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001230 }
1231
1232 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001233 error_setg(errp,
1234 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001235 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236 }
1237
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001238 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1239 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1240 sanitized_name = g_strdup(memory_region_name(block->mr));
1241 for (c = sanitized_name; *c != '\0'; c++) {
1242 if (*c == '/') {
1243 *c = '_';
1244 }
1245 }
1246
1247 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1248 sanitized_name);
1249 g_free(sanitized_name);
1250
1251 fd = mkstemp(filename);
1252 if (fd >= 0) {
1253 unlink(filename);
1254 }
1255 g_free(filename);
1256 } else {
1257 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001258 }
1259
Marcelo Tosattic9027602010-03-01 20:25:08 -03001260 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001261 error_setg_errno(errp, errno,
1262 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001263 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265
Chen Hanxiao9284f312015-07-24 11:12:03 +08001266 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267
1268 /*
1269 * ftruncate is not supported by hugetlbfs in older
1270 * hosts, so don't bother bailing out on errors.
1271 * If anything goes wrong with it under other filesystems,
1272 * mmap will fail.
1273 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001274 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001275 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001276 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001277
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001278 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001279 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001280 error_setg_errno(errp, errno,
1281 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001282 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001283 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001284 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001285
1286 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001287 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001288 }
1289
Alex Williamson04b16652010-07-02 11:13:17 -06001290 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001291 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001292
1293error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001294 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001295}
1296#endif
1297
Mike Day0dc3f442013-09-05 14:41:35 -04001298/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001299static ram_addr_t find_ram_offset(ram_addr_t size)
1300{
Alex Williamson04b16652010-07-02 11:13:17 -06001301 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001302 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001303
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001304 assert(size != 0); /* it would hand out same offset multiple times */
1305
Mike Day0dc3f442013-09-05 14:41:35 -04001306 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001307 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001308 }
Alex Williamson04b16652010-07-02 11:13:17 -06001309
Mike Day0dc3f442013-09-05 14:41:35 -04001310 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001311 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001312
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001313 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001314
Mike Day0dc3f442013-09-05 14:41:35 -04001315 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001316 if (next_block->offset >= end) {
1317 next = MIN(next, next_block->offset);
1318 }
1319 }
1320 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001321 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001322 mingap = next - end;
1323 }
1324 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001325
1326 if (offset == RAM_ADDR_MAX) {
1327 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1328 (uint64_t)size);
1329 abort();
1330 }
1331
Alex Williamson04b16652010-07-02 11:13:17 -06001332 return offset;
1333}
1334
Juan Quintela652d7ec2012-07-20 10:37:54 +02001335ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001336{
Alex Williamsond17b5282010-06-25 11:08:38 -06001337 RAMBlock *block;
1338 ram_addr_t last = 0;
1339
Mike Day0dc3f442013-09-05 14:41:35 -04001340 rcu_read_lock();
1341 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001342 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001343 }
Mike Day0dc3f442013-09-05 14:41:35 -04001344 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001345 return last;
1346}
1347
Jason Baronddb97f12012-08-02 15:44:16 -04001348static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1349{
1350 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001351
1352 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001353 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001354 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1355 if (ret) {
1356 perror("qemu_madvise");
1357 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1358 "but dump_guest_core=off specified\n");
1359 }
1360 }
1361}
1362
Mike Day0dc3f442013-09-05 14:41:35 -04001363/* Called within an RCU critical section, or while the ramlist lock
1364 * is held.
1365 */
Hu Tao20cfe882014-04-02 15:13:26 +08001366static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001367{
Hu Tao20cfe882014-04-02 15:13:26 +08001368 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001369
Mike Day0dc3f442013-09-05 14:41:35 -04001370 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001371 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001372 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001373 }
1374 }
Hu Tao20cfe882014-04-02 15:13:26 +08001375
1376 return NULL;
1377}
1378
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001379const char *qemu_ram_get_idstr(RAMBlock *rb)
1380{
1381 return rb->idstr;
1382}
1383
Mike Dayae3a7042013-09-05 14:41:35 -04001384/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001385void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1386{
Mike Dayae3a7042013-09-05 14:41:35 -04001387 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001388
Mike Day0dc3f442013-09-05 14:41:35 -04001389 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001390 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001391 assert(new_block);
1392 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001393
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001394 if (dev) {
1395 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001396 if (id) {
1397 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001398 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001399 }
1400 }
1401 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1402
Mike Day0dc3f442013-09-05 14:41:35 -04001403 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001404 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001405 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1406 new_block->idstr);
1407 abort();
1408 }
1409 }
Mike Day0dc3f442013-09-05 14:41:35 -04001410 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001411}
1412
Mike Dayae3a7042013-09-05 14:41:35 -04001413/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001414void qemu_ram_unset_idstr(ram_addr_t addr)
1415{
Mike Dayae3a7042013-09-05 14:41:35 -04001416 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001417
Mike Dayae3a7042013-09-05 14:41:35 -04001418 /* FIXME: arch_init.c assumes that this is not called throughout
1419 * migration. Ignore the problem since hot-unplug during migration
1420 * does not work anyway.
1421 */
1422
Mike Day0dc3f442013-09-05 14:41:35 -04001423 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001424 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001425 if (block) {
1426 memset(block->idstr, 0, sizeof(block->idstr));
1427 }
Mike Day0dc3f442013-09-05 14:41:35 -04001428 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001429}
1430
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001431static int memory_try_enable_merging(void *addr, size_t len)
1432{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001433 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001434 /* disabled by the user */
1435 return 0;
1436 }
1437
1438 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1439}
1440
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001441/* Only legal before guest might have detected the memory size: e.g. on
1442 * incoming migration, or right after reset.
1443 *
1444 * As memory core doesn't know how is memory accessed, it is up to
1445 * resize callback to update device state and/or add assertions to detect
1446 * misuse, if necessary.
1447 */
1448int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1449{
1450 RAMBlock *block = find_ram_block(base);
1451
1452 assert(block);
1453
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001454 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001455
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001456 if (block->used_length == newsize) {
1457 return 0;
1458 }
1459
1460 if (!(block->flags & RAM_RESIZEABLE)) {
1461 error_setg_errno(errp, EINVAL,
1462 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1463 " in != 0x" RAM_ADDR_FMT, block->idstr,
1464 newsize, block->used_length);
1465 return -EINVAL;
1466 }
1467
1468 if (block->max_length < newsize) {
1469 error_setg_errno(errp, EINVAL,
1470 "Length too large: %s: 0x" RAM_ADDR_FMT
1471 " > 0x" RAM_ADDR_FMT, block->idstr,
1472 newsize, block->max_length);
1473 return -EINVAL;
1474 }
1475
1476 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1477 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001478 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1479 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001480 memory_region_set_size(block->mr, newsize);
1481 if (block->resized) {
1482 block->resized(block->idstr, newsize, block->host);
1483 }
1484 return 0;
1485}
1486
Hu Taoef701d72014-09-09 13:27:54 +08001487static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001488{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001489 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001490 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001491 ram_addr_t old_ram_size, new_ram_size;
1492
1493 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001494
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001495 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001496 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001497
1498 if (!new_block->host) {
1499 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001500 xen_ram_alloc(new_block->offset, new_block->max_length,
1501 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001502 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001503 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001504 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001505 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001506 error_setg_errno(errp, errno,
1507 "cannot set up guest memory '%s'",
1508 memory_region_name(new_block->mr));
1509 qemu_mutex_unlock_ramlist();
1510 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001511 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001512 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001513 }
1514 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001515
Li Zhijiandd631692015-07-02 20:18:06 +08001516 new_ram_size = MAX(old_ram_size,
1517 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1518 if (new_ram_size > old_ram_size) {
1519 migration_bitmap_extend(old_ram_size, new_ram_size);
1520 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001521 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1522 * QLIST (which has an RCU-friendly variant) does not have insertion at
1523 * tail, so save the last element in last_block.
1524 */
Mike Day0dc3f442013-09-05 14:41:35 -04001525 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001526 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001527 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001528 break;
1529 }
1530 }
1531 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001533 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001534 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001535 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001536 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001537 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001538 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001539
Mike Day0dc3f442013-09-05 14:41:35 -04001540 /* Write list before version */
1541 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001542 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001543 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001544
Juan Quintela2152f5c2013-10-08 13:52:02 +02001545 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1546
1547 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001548 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001549
1550 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001551 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1552 ram_list.dirty_memory[i] =
1553 bitmap_zero_extend(ram_list.dirty_memory[i],
1554 old_ram_size, new_ram_size);
1555 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001556 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001557 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001558 new_block->used_length,
1559 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001560
Paolo Bonzinia904c912015-01-21 16:18:35 +01001561 if (new_block->host) {
1562 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1563 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1564 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1565 if (kvm_enabled()) {
1566 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1567 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001568 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001569
1570 return new_block->offset;
1571}
1572
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001573#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001574ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001575 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001576 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577{
1578 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001579 ram_addr_t addr;
1580 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581
1582 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001583 error_setg(errp, "-mem-path not supported with Xen");
1584 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001585 }
1586
1587 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1588 /*
1589 * file_ram_alloc() needs to allocate just like
1590 * phys_mem_alloc, but we haven't bothered to provide
1591 * a hook there.
1592 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001593 error_setg(errp,
1594 "-mem-path not supported with this accelerator");
1595 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001596 }
1597
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001598 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001599 new_block = g_malloc0(sizeof(*new_block));
1600 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001601 new_block->used_length = size;
1602 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001603 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001604 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001605 new_block->host = file_ram_alloc(new_block, size,
1606 mem_path, errp);
1607 if (!new_block->host) {
1608 g_free(new_block);
1609 return -1;
1610 }
1611
Hu Taoef701d72014-09-09 13:27:54 +08001612 addr = ram_block_add(new_block, &local_err);
1613 if (local_err) {
1614 g_free(new_block);
1615 error_propagate(errp, local_err);
1616 return -1;
1617 }
1618 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001619}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001620#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001621
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001622static
1623ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1624 void (*resized)(const char*,
1625 uint64_t length,
1626 void *host),
1627 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001628 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001629{
1630 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001631 ram_addr_t addr;
1632 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001633
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001634 size = HOST_PAGE_ALIGN(size);
1635 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 new_block = g_malloc0(sizeof(*new_block));
1637 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001638 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001639 new_block->used_length = size;
1640 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001641 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642 new_block->fd = -1;
1643 new_block->host = host;
1644 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001645 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001646 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001647 if (resizeable) {
1648 new_block->flags |= RAM_RESIZEABLE;
1649 }
Hu Taoef701d72014-09-09 13:27:54 +08001650 addr = ram_block_add(new_block, &local_err);
1651 if (local_err) {
1652 g_free(new_block);
1653 error_propagate(errp, local_err);
1654 return -1;
1655 }
1656 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657}
1658
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001659ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1660 MemoryRegion *mr, Error **errp)
1661{
1662 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1663}
1664
Hu Taoef701d72014-09-09 13:27:54 +08001665ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001666{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001667 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1668}
1669
1670ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1671 void (*resized)(const char*,
1672 uint64_t length,
1673 void *host),
1674 MemoryRegion *mr, Error **errp)
1675{
1676 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001677}
bellarde9a1ab12007-02-08 23:08:38 +00001678
Paolo Bonzini43771532013-09-09 17:58:40 +02001679static void reclaim_ramblock(RAMBlock *block)
1680{
1681 if (block->flags & RAM_PREALLOC) {
1682 ;
1683 } else if (xen_enabled()) {
1684 xen_invalidate_map_cache_entry(block->host);
1685#ifndef _WIN32
1686 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001687 if (block->flags & RAM_FILE) {
1688 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001689 } else {
1690 munmap(block->host, block->max_length);
1691 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001692 close(block->fd);
1693#endif
1694 } else {
1695 qemu_anon_ram_free(block->host, block->max_length);
1696 }
1697 g_free(block);
1698}
1699
Anthony Liguoric227f092009-10-01 16:12:16 -05001700void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001701{
Alex Williamson04b16652010-07-02 11:13:17 -06001702 RAMBlock *block;
1703
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001704 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001705 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001706 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001707 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001708 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001709 /* Write list before version */
1710 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001711 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001712 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001713 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001714 }
1715 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001716 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001717}
1718
Huang Yingcd19cfa2011-03-02 08:56:19 +01001719#ifndef _WIN32
1720void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1721{
1722 RAMBlock *block;
1723 ram_addr_t offset;
1724 int flags;
1725 void *area, *vaddr;
1726
Mike Day0dc3f442013-09-05 14:41:35 -04001727 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001728 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001729 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001730 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001731 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001732 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001733 } else if (xen_enabled()) {
1734 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001735 } else {
1736 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001737 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001738 flags |= (block->flags & RAM_SHARED ?
1739 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001740 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1741 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001742 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001743 /*
1744 * Remap needs to match alloc. Accelerators that
1745 * set phys_mem_alloc never remap. If they did,
1746 * we'd need a remap hook here.
1747 */
1748 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1749
Huang Yingcd19cfa2011-03-02 08:56:19 +01001750 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1751 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1752 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001753 }
1754 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001755 fprintf(stderr, "Could not remap addr: "
1756 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001757 length, addr);
1758 exit(1);
1759 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001760 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001761 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001762 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001763 }
1764 }
1765}
1766#endif /* !_WIN32 */
1767
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001768int qemu_get_ram_fd(ram_addr_t addr)
1769{
Mike Dayae3a7042013-09-05 14:41:35 -04001770 RAMBlock *block;
1771 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001772
Mike Day0dc3f442013-09-05 14:41:35 -04001773 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001774 block = qemu_get_ram_block(addr);
1775 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001776 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001777 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001778}
1779
Damjan Marion3fd74b82014-06-26 23:01:32 +02001780void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1781{
Mike Dayae3a7042013-09-05 14:41:35 -04001782 RAMBlock *block;
1783 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001784
Mike Day0dc3f442013-09-05 14:41:35 -04001785 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001786 block = qemu_get_ram_block(addr);
1787 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001788 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001789 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001790}
1791
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001792/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001793 * This should not be used for general purpose DMA. Use address_space_map
1794 * or address_space_rw instead. For local memory (e.g. video ram) that the
1795 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001796 *
1797 * By the time this function returns, the returned pointer is not protected
1798 * by RCU anymore. If the caller is not within an RCU critical section and
1799 * does not hold the iothread lock, it must have other means of protecting the
1800 * pointer, such as a reference to the region that includes the incoming
1801 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001802 */
1803void *qemu_get_ram_ptr(ram_addr_t addr)
1804{
Mike Dayae3a7042013-09-05 14:41:35 -04001805 RAMBlock *block;
1806 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001807
Mike Day0dc3f442013-09-05 14:41:35 -04001808 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001809 block = qemu_get_ram_block(addr);
1810
1811 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001812 /* We need to check if the requested address is in the RAM
1813 * because we don't want to map the entire memory in QEMU.
1814 * In that case just map until the end of the page.
1815 */
1816 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001817 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001818 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001819 }
Mike Dayae3a7042013-09-05 14:41:35 -04001820
1821 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001822 }
Mike Dayae3a7042013-09-05 14:41:35 -04001823 ptr = ramblock_ptr(block, addr - block->offset);
1824
Mike Day0dc3f442013-09-05 14:41:35 -04001825unlock:
1826 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001827 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001828}
1829
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001830/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001831 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001832 *
1833 * By the time this function returns, the returned pointer is not protected
1834 * by RCU anymore. If the caller is not within an RCU critical section and
1835 * does not hold the iothread lock, it must have other means of protecting the
1836 * pointer, such as a reference to the region that includes the incoming
1837 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001838 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001839static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001840{
Mike Dayae3a7042013-09-05 14:41:35 -04001841 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001842 if (*size == 0) {
1843 return NULL;
1844 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001845 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001846 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001847 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001848 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001849 rcu_read_lock();
1850 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001851 if (addr - block->offset < block->max_length) {
1852 if (addr - block->offset + *size > block->max_length)
1853 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001854 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001855 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001856 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001857 }
1858 }
1859
1860 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1861 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001862 }
1863}
1864
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001865/*
1866 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1867 * in that RAMBlock.
1868 *
1869 * ptr: Host pointer to look up
1870 * round_offset: If true round the result offset down to a page boundary
1871 * *ram_addr: set to result ram_addr
1872 * *offset: set to result offset within the RAMBlock
1873 *
1874 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001875 *
1876 * By the time this function returns, the returned pointer is not protected
1877 * by RCU anymore. If the caller is not within an RCU critical section and
1878 * does not hold the iothread lock, it must have other means of protecting the
1879 * pointer, such as a reference to the region that includes the incoming
1880 * ram_addr_t.
1881 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001882RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1883 ram_addr_t *ram_addr,
1884 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001885{
pbrook94a6b542009-04-11 17:15:54 +00001886 RAMBlock *block;
1887 uint8_t *host = ptr;
1888
Jan Kiszka868bb332011-06-21 22:59:09 +02001889 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001890 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001891 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001892 block = qemu_get_ram_block(*ram_addr);
1893 if (block) {
1894 *offset = (host - block->host);
1895 }
Mike Day0dc3f442013-09-05 14:41:35 -04001896 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001897 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001898 }
1899
Mike Day0dc3f442013-09-05 14:41:35 -04001900 rcu_read_lock();
1901 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001902 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001903 goto found;
1904 }
1905
Mike Day0dc3f442013-09-05 14:41:35 -04001906 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001907 /* This case append when the block is not mapped. */
1908 if (block->host == NULL) {
1909 continue;
1910 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001911 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001912 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001913 }
pbrook94a6b542009-04-11 17:15:54 +00001914 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001915
Mike Day0dc3f442013-09-05 14:41:35 -04001916 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001917 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001918
1919found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001920 *offset = (host - block->host);
1921 if (round_offset) {
1922 *offset &= TARGET_PAGE_MASK;
1923 }
1924 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001925 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001926 return block;
1927}
1928
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001929/*
1930 * Finds the named RAMBlock
1931 *
1932 * name: The name of RAMBlock to find
1933 *
1934 * Returns: RAMBlock (or NULL if not found)
1935 */
1936RAMBlock *qemu_ram_block_by_name(const char *name)
1937{
1938 RAMBlock *block;
1939
1940 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1941 if (!strcmp(name, block->idstr)) {
1942 return block;
1943 }
1944 }
1945
1946 return NULL;
1947}
1948
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001949/* Some of the softmmu routines need to translate from a host pointer
1950 (typically a TLB entry) back to a ram offset. */
1951MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1952{
1953 RAMBlock *block;
1954 ram_addr_t offset; /* Not used */
1955
1956 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1957
1958 if (!block) {
1959 return NULL;
1960 }
1961
1962 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001963}
Alex Williamsonf471a172010-06-11 11:11:42 -06001964
Avi Kivitya8170e52012-10-23 12:30:10 +02001965static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001966 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001967{
Juan Quintela52159192013-10-08 12:44:04 +02001968 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001969 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001970 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001971 switch (size) {
1972 case 1:
1973 stb_p(qemu_get_ram_ptr(ram_addr), val);
1974 break;
1975 case 2:
1976 stw_p(qemu_get_ram_ptr(ram_addr), val);
1977 break;
1978 case 4:
1979 stl_p(qemu_get_ram_ptr(ram_addr), val);
1980 break;
1981 default:
1982 abort();
1983 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001984 /* Set both VGA and migration bits for simplicity and to remove
1985 * the notdirty callback faster.
1986 */
1987 cpu_physical_memory_set_dirty_range(ram_addr, size,
1988 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001989 /* we remove the notdirty callback only if the code has been
1990 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001991 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001992 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001993 }
bellard1ccde1c2004-02-06 19:46:14 +00001994}
1995
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001996static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1997 unsigned size, bool is_write)
1998{
1999 return is_write;
2000}
2001
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002002static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002003 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002004 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002005 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002006};
2007
pbrook0f459d12008-06-09 00:20:13 +00002008/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002009static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002010{
Andreas Färber93afead2013-08-26 03:41:01 +02002011 CPUState *cpu = current_cpu;
2012 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002013 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002014 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002015 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002016 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002017
Andreas Färberff4700b2013-08-26 18:23:18 +02002018 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002019 /* We re-entered the check after replacing the TB. Now raise
2020 * the debug interrupt so that is will trigger after the
2021 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002022 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002023 return;
2024 }
Andreas Färber93afead2013-08-26 03:41:01 +02002025 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002026 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002027 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2028 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002029 if (flags == BP_MEM_READ) {
2030 wp->flags |= BP_WATCHPOINT_HIT_READ;
2031 } else {
2032 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2033 }
2034 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002035 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002036 if (!cpu->watchpoint_hit) {
2037 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002038 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002039 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002040 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002041 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002042 } else {
2043 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002044 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002045 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002046 }
aliguori06d55cc2008-11-18 20:24:06 +00002047 }
aliguori6e140f22008-11-18 20:37:55 +00002048 } else {
2049 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002050 }
2051 }
2052}
2053
pbrook6658ffb2007-03-16 23:58:11 +00002054/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2055 so these check for a hit then pass through to the normal out-of-line
2056 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002057static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2058 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002059{
Peter Maydell66b9b432015-04-26 16:49:24 +01002060 MemTxResult res;
2061 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002062
Peter Maydell66b9b432015-04-26 16:49:24 +01002063 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002064 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002065 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002066 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002067 break;
2068 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002069 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002070 break;
2071 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002072 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002073 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002074 default: abort();
2075 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002076 *pdata = data;
2077 return res;
2078}
2079
2080static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2081 uint64_t val, unsigned size,
2082 MemTxAttrs attrs)
2083{
2084 MemTxResult res;
2085
2086 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2087 switch (size) {
2088 case 1:
2089 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2090 break;
2091 case 2:
2092 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2093 break;
2094 case 4:
2095 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2096 break;
2097 default: abort();
2098 }
2099 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002100}
2101
Avi Kivity1ec9b902012-01-02 12:47:48 +02002102static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002103 .read_with_attrs = watch_mem_read,
2104 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002105 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002106};
pbrook6658ffb2007-03-16 23:58:11 +00002107
Peter Maydellf25a49e2015-04-26 16:49:24 +01002108static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2109 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002110{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002111 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002112 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002113 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002114
blueswir1db7b5422007-05-26 17:36:03 +00002115#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002116 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002117 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002118#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002119 res = address_space_read(subpage->as, addr + subpage->base,
2120 attrs, buf, len);
2121 if (res) {
2122 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002123 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002124 switch (len) {
2125 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002126 *data = ldub_p(buf);
2127 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002129 *data = lduw_p(buf);
2130 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002131 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002132 *data = ldl_p(buf);
2133 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002134 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002135 *data = ldq_p(buf);
2136 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002137 default:
2138 abort();
2139 }
blueswir1db7b5422007-05-26 17:36:03 +00002140}
2141
Peter Maydellf25a49e2015-04-26 16:49:24 +01002142static MemTxResult subpage_write(void *opaque, hwaddr addr,
2143 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002144{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002145 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002146 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002147
blueswir1db7b5422007-05-26 17:36:03 +00002148#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002149 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 " value %"PRIx64"\n",
2151 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002152#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002153 switch (len) {
2154 case 1:
2155 stb_p(buf, value);
2156 break;
2157 case 2:
2158 stw_p(buf, value);
2159 break;
2160 case 4:
2161 stl_p(buf, value);
2162 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002163 case 8:
2164 stq_p(buf, value);
2165 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002166 default:
2167 abort();
2168 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002169 return address_space_write(subpage->as, addr + subpage->base,
2170 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002171}
2172
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002173static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002174 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002175{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002177#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002178 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002179 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002180#endif
2181
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002182 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002183 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002184}
2185
Avi Kivity70c68e42012-01-02 12:32:48 +02002186static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002187 .read_with_attrs = subpage_read,
2188 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002189 .impl.min_access_size = 1,
2190 .impl.max_access_size = 8,
2191 .valid.min_access_size = 1,
2192 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002193 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002194 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002195};
2196
Anthony Liguoric227f092009-10-01 16:12:16 -05002197static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002198 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002199{
2200 int idx, eidx;
2201
2202 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2203 return -1;
2204 idx = SUBPAGE_IDX(start);
2205 eidx = SUBPAGE_IDX(end);
2206#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002207 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2208 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002209#endif
blueswir1db7b5422007-05-26 17:36:03 +00002210 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002211 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002212 }
2213
2214 return 0;
2215}
2216
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002217static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002218{
Anthony Liguoric227f092009-10-01 16:12:16 -05002219 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002220
Anthony Liguori7267c092011-08-20 22:09:37 -05002221 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002222
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002223 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002224 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002225 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002226 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002227 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002228#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002229 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2230 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002231#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002232 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002233
2234 return mmio;
2235}
2236
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002237static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2238 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002239{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002240 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002241 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002242 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002243 .mr = mr,
2244 .offset_within_address_space = 0,
2245 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002246 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002247 };
2248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002249 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002250}
2251
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002252MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002253{
Peter Maydell32857f42015-10-01 15:29:50 +01002254 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2255 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002256 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002257
2258 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002259}
2260
Avi Kivitye9179ce2009-06-14 11:38:52 +03002261static void io_mem_init(void)
2262{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002263 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002264 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002265 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002266 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002267 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002268 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002269 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002270}
2271
Avi Kivityac1970f2012-10-03 16:22:53 +02002272static void mem_begin(MemoryListener *listener)
2273{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002274 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002275 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2276 uint16_t n;
2277
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002278 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002279 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002280 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002281 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002282 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002283 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002284 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002285 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002286
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002287 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002288 d->as = as;
2289 as->next_dispatch = d;
2290}
2291
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002292static void address_space_dispatch_free(AddressSpaceDispatch *d)
2293{
2294 phys_sections_free(&d->map);
2295 g_free(d);
2296}
2297
Paolo Bonzini00752702013-05-29 12:13:54 +02002298static void mem_commit(MemoryListener *listener)
2299{
2300 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002301 AddressSpaceDispatch *cur = as->dispatch;
2302 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002303
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002304 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002305
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002306 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002307 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002308 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002309 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002310}
2311
Avi Kivity1d711482012-10-02 18:54:45 +02002312static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002313{
Peter Maydell32857f42015-10-01 15:29:50 +01002314 CPUAddressSpace *cpuas;
2315 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002316
2317 /* since each CPU stores ram addresses in its TLB cache, we must
2318 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002319 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2320 cpu_reloading_memory_map();
2321 /* The CPU and TLB are protected by the iothread lock.
2322 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2323 * may have split the RCU critical section.
2324 */
2325 d = atomic_rcu_read(&cpuas->as->dispatch);
2326 cpuas->memory_dispatch = d;
2327 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002328}
2329
Avi Kivityac1970f2012-10-03 16:22:53 +02002330void address_space_init_dispatch(AddressSpace *as)
2331{
Paolo Bonzini00752702013-05-29 12:13:54 +02002332 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002333 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002334 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002335 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002336 .region_add = mem_add,
2337 .region_nop = mem_add,
2338 .priority = 0,
2339 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002340 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002341}
2342
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002343void address_space_unregister(AddressSpace *as)
2344{
2345 memory_listener_unregister(&as->dispatch_listener);
2346}
2347
Avi Kivity83f3c252012-10-07 12:59:55 +02002348void address_space_destroy_dispatch(AddressSpace *as)
2349{
2350 AddressSpaceDispatch *d = as->dispatch;
2351
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002352 atomic_rcu_set(&as->dispatch, NULL);
2353 if (d) {
2354 call_rcu(d, address_space_dispatch_free, rcu);
2355 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002356}
2357
Avi Kivity62152b82011-07-26 14:26:14 +03002358static void memory_map_init(void)
2359{
Anthony Liguori7267c092011-08-20 22:09:37 -05002360 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002361
Paolo Bonzini57271d62013-11-07 17:14:37 +01002362 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002363 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002364
Anthony Liguori7267c092011-08-20 22:09:37 -05002365 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002366 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2367 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002368 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002369}
2370
2371MemoryRegion *get_system_memory(void)
2372{
2373 return system_memory;
2374}
2375
Avi Kivity309cb472011-08-08 16:09:03 +03002376MemoryRegion *get_system_io(void)
2377{
2378 return system_io;
2379}
2380
pbrooke2eef172008-06-08 01:09:01 +00002381#endif /* !defined(CONFIG_USER_ONLY) */
2382
bellard13eb76e2004-01-24 15:23:36 +00002383/* physical memory access (slow version, mainly for debug) */
2384#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002385int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002386 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002387{
2388 int l, flags;
2389 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002390 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002391
2392 while (len > 0) {
2393 page = addr & TARGET_PAGE_MASK;
2394 l = (page + TARGET_PAGE_SIZE) - addr;
2395 if (l > len)
2396 l = len;
2397 flags = page_get_flags(page);
2398 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002399 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002400 if (is_write) {
2401 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002402 return -1;
bellard579a97f2007-11-11 14:26:47 +00002403 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002404 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002405 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002406 memcpy(p, buf, l);
2407 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002408 } else {
2409 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002410 return -1;
bellard579a97f2007-11-11 14:26:47 +00002411 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002412 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002413 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002414 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002415 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002416 }
2417 len -= l;
2418 buf += l;
2419 addr += l;
2420 }
Paul Brooka68fe892010-03-01 00:08:59 +00002421 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002422}
bellard8df1cd02005-01-28 22:37:22 +00002423
bellard13eb76e2004-01-24 15:23:36 +00002424#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002425
Paolo Bonzini845b6212015-03-23 11:45:53 +01002426static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002427 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002428{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002429 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2430 /* No early return if dirty_log_mask is or becomes 0, because
2431 * cpu_physical_memory_set_dirty_range will still call
2432 * xen_modified_memory.
2433 */
2434 if (dirty_log_mask) {
2435 dirty_log_mask =
2436 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002437 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002438 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2439 tb_invalidate_phys_range(addr, addr + length);
2440 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2441 }
2442 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002443}
2444
Richard Henderson23326162013-07-08 14:55:59 -07002445static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002446{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002447 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002448
2449 /* Regions are assumed to support 1-4 byte accesses unless
2450 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002451 if (access_size_max == 0) {
2452 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002453 }
Richard Henderson23326162013-07-08 14:55:59 -07002454
2455 /* Bound the maximum access by the alignment of the address. */
2456 if (!mr->ops->impl.unaligned) {
2457 unsigned align_size_max = addr & -addr;
2458 if (align_size_max != 0 && align_size_max < access_size_max) {
2459 access_size_max = align_size_max;
2460 }
2461 }
2462
2463 /* Don't attempt accesses larger than the maximum. */
2464 if (l > access_size_max) {
2465 l = access_size_max;
2466 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002467 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002468
2469 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002470}
2471
Jan Kiszka4840f102015-06-18 18:47:22 +02002472static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002473{
Jan Kiszka4840f102015-06-18 18:47:22 +02002474 bool unlocked = !qemu_mutex_iothread_locked();
2475 bool release_lock = false;
2476
2477 if (unlocked && mr->global_locking) {
2478 qemu_mutex_lock_iothread();
2479 unlocked = false;
2480 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002481 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002482 if (mr->flush_coalesced_mmio) {
2483 if (unlocked) {
2484 qemu_mutex_lock_iothread();
2485 }
2486 qemu_flush_coalesced_mmio_buffer();
2487 if (unlocked) {
2488 qemu_mutex_unlock_iothread();
2489 }
2490 }
2491
2492 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002493}
2494
Peter Maydell5c9eb022015-04-26 16:49:24 +01002495MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2496 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002497{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002498 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002499 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002500 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002501 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002502 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002503 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002504 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002505
Paolo Bonzini41063e12015-03-18 14:21:43 +01002506 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002507 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002508 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002509 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002510
bellard13eb76e2004-01-24 15:23:36 +00002511 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002512 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002513 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002514 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002515 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002516 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002517 switch (l) {
2518 case 8:
2519 /* 64 bit write access */
2520 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002521 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2522 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002523 break;
2524 case 4:
bellard1c213d12005-09-03 10:49:04 +00002525 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002526 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002527 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2528 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002529 break;
2530 case 2:
bellard1c213d12005-09-03 10:49:04 +00002531 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002532 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002533 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2534 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002535 break;
2536 case 1:
bellard1c213d12005-09-03 10:49:04 +00002537 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002538 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002539 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2540 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002541 break;
2542 default:
2543 abort();
bellard13eb76e2004-01-24 15:23:36 +00002544 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002545 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002546 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002547 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002548 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002549 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002550 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002551 }
2552 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002553 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002554 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002555 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002556 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002557 switch (l) {
2558 case 8:
2559 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002560 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2561 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002562 stq_p(buf, val);
2563 break;
2564 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002565 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002566 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2567 attrs);
bellardc27004e2005-01-03 23:35:10 +00002568 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002569 break;
2570 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002571 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002572 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2573 attrs);
bellardc27004e2005-01-03 23:35:10 +00002574 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002575 break;
2576 case 1:
bellard1c213d12005-09-03 10:49:04 +00002577 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002578 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2579 attrs);
bellardc27004e2005-01-03 23:35:10 +00002580 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002581 break;
2582 default:
2583 abort();
bellard13eb76e2004-01-24 15:23:36 +00002584 }
2585 } else {
2586 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002587 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002588 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002589 }
2590 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002591
2592 if (release_lock) {
2593 qemu_mutex_unlock_iothread();
2594 release_lock = false;
2595 }
2596
bellard13eb76e2004-01-24 15:23:36 +00002597 len -= l;
2598 buf += l;
2599 addr += l;
2600 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002601 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002602
Peter Maydell3b643492015-04-26 16:49:23 +01002603 return result;
bellard13eb76e2004-01-24 15:23:36 +00002604}
bellard8df1cd02005-01-28 22:37:22 +00002605
Peter Maydell5c9eb022015-04-26 16:49:24 +01002606MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2607 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002608{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002609 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002610}
2611
Peter Maydell5c9eb022015-04-26 16:49:24 +01002612MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2613 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002614{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002615 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002616}
2617
2618
Avi Kivitya8170e52012-10-23 12:30:10 +02002619void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002620 int len, int is_write)
2621{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002622 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2623 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002624}
2625
Alexander Graf582b55a2013-12-11 14:17:44 +01002626enum write_rom_type {
2627 WRITE_DATA,
2628 FLUSH_CACHE,
2629};
2630
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002631static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002632 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002633{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002634 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002635 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002636 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002637 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002638
Paolo Bonzini41063e12015-03-18 14:21:43 +01002639 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002640 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002641 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002642 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002643
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002644 if (!(memory_region_is_ram(mr) ||
2645 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002646 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002647 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002648 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002649 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002650 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002651 switch (type) {
2652 case WRITE_DATA:
2653 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002654 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002655 break;
2656 case FLUSH_CACHE:
2657 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2658 break;
2659 }
bellardd0ecd2a2006-04-23 17:14:48 +00002660 }
2661 len -= l;
2662 buf += l;
2663 addr += l;
2664 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002665 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002666}
2667
Alexander Graf582b55a2013-12-11 14:17:44 +01002668/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002669void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002670 const uint8_t *buf, int len)
2671{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002672 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002673}
2674
2675void cpu_flush_icache_range(hwaddr start, int len)
2676{
2677 /*
2678 * This function should do the same thing as an icache flush that was
2679 * triggered from within the guest. For TCG we are always cache coherent,
2680 * so there is no need to flush anything. For KVM / Xen we need to flush
2681 * the host's instruction cache at least.
2682 */
2683 if (tcg_enabled()) {
2684 return;
2685 }
2686
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002687 cpu_physical_memory_write_rom_internal(&address_space_memory,
2688 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002689}
2690
aliguori6d16c2f2009-01-22 16:59:11 +00002691typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002692 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002693 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002694 hwaddr addr;
2695 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002696 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002697} BounceBuffer;
2698
2699static BounceBuffer bounce;
2700
aliguoriba223c22009-01-22 16:59:16 +00002701typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002702 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002703 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002704} MapClient;
2705
Fam Zheng38e047b2015-03-16 17:03:35 +08002706QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002707static QLIST_HEAD(map_client_list, MapClient) map_client_list
2708 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002709
Fam Zhenge95205e2015-03-16 17:03:37 +08002710static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002711{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002712 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002713 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002714}
2715
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002716static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002717{
2718 MapClient *client;
2719
Blue Swirl72cf2d42009-09-12 07:36:22 +00002720 while (!QLIST_EMPTY(&map_client_list)) {
2721 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002722 qemu_bh_schedule(client->bh);
2723 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002724 }
2725}
2726
Fam Zhenge95205e2015-03-16 17:03:37 +08002727void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002728{
2729 MapClient *client = g_malloc(sizeof(*client));
2730
Fam Zheng38e047b2015-03-16 17:03:35 +08002731 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002732 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002733 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002734 if (!atomic_read(&bounce.in_use)) {
2735 cpu_notify_map_clients_locked();
2736 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002737 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002738}
2739
Fam Zheng38e047b2015-03-16 17:03:35 +08002740void cpu_exec_init_all(void)
2741{
2742 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002743 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002744 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002745 qemu_mutex_init(&map_client_list_lock);
2746}
2747
Fam Zhenge95205e2015-03-16 17:03:37 +08002748void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002749{
Fam Zhenge95205e2015-03-16 17:03:37 +08002750 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002751
Fam Zhenge95205e2015-03-16 17:03:37 +08002752 qemu_mutex_lock(&map_client_list_lock);
2753 QLIST_FOREACH(client, &map_client_list, link) {
2754 if (client->bh == bh) {
2755 cpu_unregister_map_client_do(client);
2756 break;
2757 }
2758 }
2759 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002760}
2761
2762static void cpu_notify_map_clients(void)
2763{
Fam Zheng38e047b2015-03-16 17:03:35 +08002764 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002765 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002766 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002767}
2768
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002769bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2770{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002771 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002772 hwaddr l, xlat;
2773
Paolo Bonzini41063e12015-03-18 14:21:43 +01002774 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002775 while (len > 0) {
2776 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002777 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2778 if (!memory_access_is_direct(mr, is_write)) {
2779 l = memory_access_size(mr, l, addr);
2780 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002781 return false;
2782 }
2783 }
2784
2785 len -= l;
2786 addr += l;
2787 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002788 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002789 return true;
2790}
2791
aliguori6d16c2f2009-01-22 16:59:11 +00002792/* Map a physical memory region into a host virtual address.
2793 * May map a subset of the requested range, given by and returned in *plen.
2794 * May return NULL if resources needed to perform the mapping are exhausted.
2795 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002796 * Use cpu_register_map_client() to know when retrying the map operation is
2797 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002798 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002799void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002800 hwaddr addr,
2801 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002802 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002803{
Avi Kivitya8170e52012-10-23 12:30:10 +02002804 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002805 hwaddr done = 0;
2806 hwaddr l, xlat, base;
2807 MemoryRegion *mr, *this_mr;
2808 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002809
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002810 if (len == 0) {
2811 return NULL;
2812 }
aliguori6d16c2f2009-01-22 16:59:11 +00002813
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002814 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002815 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002816 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002817
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002818 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002819 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002820 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002821 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002822 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002823 /* Avoid unbounded allocations */
2824 l = MIN(l, TARGET_PAGE_SIZE);
2825 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002826 bounce.addr = addr;
2827 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002828
2829 memory_region_ref(mr);
2830 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002831 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002832 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2833 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002834 }
aliguori6d16c2f2009-01-22 16:59:11 +00002835
Paolo Bonzini41063e12015-03-18 14:21:43 +01002836 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002837 *plen = l;
2838 return bounce.buffer;
2839 }
2840
2841 base = xlat;
2842 raddr = memory_region_get_ram_addr(mr);
2843
2844 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002845 len -= l;
2846 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002847 done += l;
2848 if (len == 0) {
2849 break;
2850 }
2851
2852 l = len;
2853 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2854 if (this_mr != mr || xlat != base + done) {
2855 break;
2856 }
aliguori6d16c2f2009-01-22 16:59:11 +00002857 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002858
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002859 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002860 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002861 *plen = done;
2862 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002863}
2864
Avi Kivityac1970f2012-10-03 16:22:53 +02002865/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002866 * Will also mark the memory as dirty if is_write == 1. access_len gives
2867 * the amount of memory that was actually read or written by the caller.
2868 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002869void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2870 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002871{
2872 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002873 MemoryRegion *mr;
2874 ram_addr_t addr1;
2875
2876 mr = qemu_ram_addr_from_host(buffer, &addr1);
2877 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002878 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002879 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002880 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002881 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002882 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002883 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002884 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002885 return;
2886 }
2887 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002888 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2889 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002890 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002891 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002892 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002893 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002894 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002895 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002896}
bellardd0ecd2a2006-04-23 17:14:48 +00002897
Avi Kivitya8170e52012-10-23 12:30:10 +02002898void *cpu_physical_memory_map(hwaddr addr,
2899 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002900 int is_write)
2901{
2902 return address_space_map(&address_space_memory, addr, plen, is_write);
2903}
2904
Avi Kivitya8170e52012-10-23 12:30:10 +02002905void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2906 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002907{
2908 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2909}
2910
bellard8df1cd02005-01-28 22:37:22 +00002911/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002912static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2913 MemTxAttrs attrs,
2914 MemTxResult *result,
2915 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002916{
bellard8df1cd02005-01-28 22:37:22 +00002917 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002918 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002919 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002920 hwaddr l = 4;
2921 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002922 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002923 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002924
Paolo Bonzini41063e12015-03-18 14:21:43 +01002925 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002926 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002927 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002928 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002929
bellard8df1cd02005-01-28 22:37:22 +00002930 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002931 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002932#if defined(TARGET_WORDS_BIGENDIAN)
2933 if (endian == DEVICE_LITTLE_ENDIAN) {
2934 val = bswap32(val);
2935 }
2936#else
2937 if (endian == DEVICE_BIG_ENDIAN) {
2938 val = bswap32(val);
2939 }
2940#endif
bellard8df1cd02005-01-28 22:37:22 +00002941 } else {
2942 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002943 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002944 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002945 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002946 switch (endian) {
2947 case DEVICE_LITTLE_ENDIAN:
2948 val = ldl_le_p(ptr);
2949 break;
2950 case DEVICE_BIG_ENDIAN:
2951 val = ldl_be_p(ptr);
2952 break;
2953 default:
2954 val = ldl_p(ptr);
2955 break;
2956 }
Peter Maydell50013112015-04-26 16:49:24 +01002957 r = MEMTX_OK;
2958 }
2959 if (result) {
2960 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002961 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002962 if (release_lock) {
2963 qemu_mutex_unlock_iothread();
2964 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002965 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002966 return val;
2967}
2968
Peter Maydell50013112015-04-26 16:49:24 +01002969uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2970 MemTxAttrs attrs, MemTxResult *result)
2971{
2972 return address_space_ldl_internal(as, addr, attrs, result,
2973 DEVICE_NATIVE_ENDIAN);
2974}
2975
2976uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2977 MemTxAttrs attrs, MemTxResult *result)
2978{
2979 return address_space_ldl_internal(as, addr, attrs, result,
2980 DEVICE_LITTLE_ENDIAN);
2981}
2982
2983uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2984 MemTxAttrs attrs, MemTxResult *result)
2985{
2986 return address_space_ldl_internal(as, addr, attrs, result,
2987 DEVICE_BIG_ENDIAN);
2988}
2989
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002990uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002991{
Peter Maydell50013112015-04-26 16:49:24 +01002992 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002993}
2994
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002995uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002996{
Peter Maydell50013112015-04-26 16:49:24 +01002997 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002998}
2999
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003000uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003001{
Peter Maydell50013112015-04-26 16:49:24 +01003002 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003003}
3004
bellard84b7b8e2005-11-28 21:19:04 +00003005/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003006static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3007 MemTxAttrs attrs,
3008 MemTxResult *result,
3009 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003010{
bellard84b7b8e2005-11-28 21:19:04 +00003011 uint8_t *ptr;
3012 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003013 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003014 hwaddr l = 8;
3015 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003016 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003017 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003018
Paolo Bonzini41063e12015-03-18 14:21:43 +01003019 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003020 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003021 false);
3022 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003023 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003024
bellard84b7b8e2005-11-28 21:19:04 +00003025 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003026 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003027#if defined(TARGET_WORDS_BIGENDIAN)
3028 if (endian == DEVICE_LITTLE_ENDIAN) {
3029 val = bswap64(val);
3030 }
3031#else
3032 if (endian == DEVICE_BIG_ENDIAN) {
3033 val = bswap64(val);
3034 }
3035#endif
bellard84b7b8e2005-11-28 21:19:04 +00003036 } else {
3037 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003038 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003039 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003040 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003041 switch (endian) {
3042 case DEVICE_LITTLE_ENDIAN:
3043 val = ldq_le_p(ptr);
3044 break;
3045 case DEVICE_BIG_ENDIAN:
3046 val = ldq_be_p(ptr);
3047 break;
3048 default:
3049 val = ldq_p(ptr);
3050 break;
3051 }
Peter Maydell50013112015-04-26 16:49:24 +01003052 r = MEMTX_OK;
3053 }
3054 if (result) {
3055 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003056 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003057 if (release_lock) {
3058 qemu_mutex_unlock_iothread();
3059 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003060 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003061 return val;
3062}
3063
Peter Maydell50013112015-04-26 16:49:24 +01003064uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3065 MemTxAttrs attrs, MemTxResult *result)
3066{
3067 return address_space_ldq_internal(as, addr, attrs, result,
3068 DEVICE_NATIVE_ENDIAN);
3069}
3070
3071uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3072 MemTxAttrs attrs, MemTxResult *result)
3073{
3074 return address_space_ldq_internal(as, addr, attrs, result,
3075 DEVICE_LITTLE_ENDIAN);
3076}
3077
3078uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3079 MemTxAttrs attrs, MemTxResult *result)
3080{
3081 return address_space_ldq_internal(as, addr, attrs, result,
3082 DEVICE_BIG_ENDIAN);
3083}
3084
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003085uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003086{
Peter Maydell50013112015-04-26 16:49:24 +01003087 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003088}
3089
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003090uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003091{
Peter Maydell50013112015-04-26 16:49:24 +01003092 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003093}
3094
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003095uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003096{
Peter Maydell50013112015-04-26 16:49:24 +01003097 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003098}
3099
bellardaab33092005-10-30 20:48:42 +00003100/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003101uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3102 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003103{
3104 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003105 MemTxResult r;
3106
3107 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3108 if (result) {
3109 *result = r;
3110 }
bellardaab33092005-10-30 20:48:42 +00003111 return val;
3112}
3113
Peter Maydell50013112015-04-26 16:49:24 +01003114uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3115{
3116 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3117}
3118
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003119/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003120static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3121 hwaddr addr,
3122 MemTxAttrs attrs,
3123 MemTxResult *result,
3124 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003125{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003126 uint8_t *ptr;
3127 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003128 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003129 hwaddr l = 2;
3130 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003131 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003132 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003133
Paolo Bonzini41063e12015-03-18 14:21:43 +01003134 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003135 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003136 false);
3137 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003138 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003139
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003140 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003141 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142#if defined(TARGET_WORDS_BIGENDIAN)
3143 if (endian == DEVICE_LITTLE_ENDIAN) {
3144 val = bswap16(val);
3145 }
3146#else
3147 if (endian == DEVICE_BIG_ENDIAN) {
3148 val = bswap16(val);
3149 }
3150#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003151 } else {
3152 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003153 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003154 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003155 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003156 switch (endian) {
3157 case DEVICE_LITTLE_ENDIAN:
3158 val = lduw_le_p(ptr);
3159 break;
3160 case DEVICE_BIG_ENDIAN:
3161 val = lduw_be_p(ptr);
3162 break;
3163 default:
3164 val = lduw_p(ptr);
3165 break;
3166 }
Peter Maydell50013112015-04-26 16:49:24 +01003167 r = MEMTX_OK;
3168 }
3169 if (result) {
3170 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003171 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003172 if (release_lock) {
3173 qemu_mutex_unlock_iothread();
3174 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003175 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003176 return val;
bellardaab33092005-10-30 20:48:42 +00003177}
3178
Peter Maydell50013112015-04-26 16:49:24 +01003179uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3180 MemTxAttrs attrs, MemTxResult *result)
3181{
3182 return address_space_lduw_internal(as, addr, attrs, result,
3183 DEVICE_NATIVE_ENDIAN);
3184}
3185
3186uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3187 MemTxAttrs attrs, MemTxResult *result)
3188{
3189 return address_space_lduw_internal(as, addr, attrs, result,
3190 DEVICE_LITTLE_ENDIAN);
3191}
3192
3193uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3194 MemTxAttrs attrs, MemTxResult *result)
3195{
3196 return address_space_lduw_internal(as, addr, attrs, result,
3197 DEVICE_BIG_ENDIAN);
3198}
3199
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003200uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003201{
Peter Maydell50013112015-04-26 16:49:24 +01003202 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003203}
3204
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003205uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003206{
Peter Maydell50013112015-04-26 16:49:24 +01003207 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003208}
3209
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003210uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003211{
Peter Maydell50013112015-04-26 16:49:24 +01003212 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003213}
3214
bellard8df1cd02005-01-28 22:37:22 +00003215/* warning: addr must be aligned. The ram page is not masked as dirty
3216 and the code inside is not invalidated. It is useful if the dirty
3217 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003218void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3219 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003220{
bellard8df1cd02005-01-28 22:37:22 +00003221 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003222 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003223 hwaddr l = 4;
3224 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003225 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003226 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003227 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003228
Paolo Bonzini41063e12015-03-18 14:21:43 +01003229 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003230 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003231 true);
3232 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003233 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003234
Peter Maydell50013112015-04-26 16:49:24 +01003235 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003236 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003237 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003238 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003239 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003240
Paolo Bonzini845b6212015-03-23 11:45:53 +01003241 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3242 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003243 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003244 r = MEMTX_OK;
3245 }
3246 if (result) {
3247 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003248 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003249 if (release_lock) {
3250 qemu_mutex_unlock_iothread();
3251 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003252 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003253}
3254
Peter Maydell50013112015-04-26 16:49:24 +01003255void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3256{
3257 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3258}
3259
bellard8df1cd02005-01-28 22:37:22 +00003260/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003261static inline void address_space_stl_internal(AddressSpace *as,
3262 hwaddr addr, uint32_t val,
3263 MemTxAttrs attrs,
3264 MemTxResult *result,
3265 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003266{
bellard8df1cd02005-01-28 22:37:22 +00003267 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003268 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003269 hwaddr l = 4;
3270 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003271 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003272 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003273
Paolo Bonzini41063e12015-03-18 14:21:43 +01003274 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003275 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003276 true);
3277 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003278 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003279
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003280#if defined(TARGET_WORDS_BIGENDIAN)
3281 if (endian == DEVICE_LITTLE_ENDIAN) {
3282 val = bswap32(val);
3283 }
3284#else
3285 if (endian == DEVICE_BIG_ENDIAN) {
3286 val = bswap32(val);
3287 }
3288#endif
Peter Maydell50013112015-04-26 16:49:24 +01003289 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003290 } else {
bellard8df1cd02005-01-28 22:37:22 +00003291 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003292 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003293 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003294 switch (endian) {
3295 case DEVICE_LITTLE_ENDIAN:
3296 stl_le_p(ptr, val);
3297 break;
3298 case DEVICE_BIG_ENDIAN:
3299 stl_be_p(ptr, val);
3300 break;
3301 default:
3302 stl_p(ptr, val);
3303 break;
3304 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003305 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003306 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003307 }
Peter Maydell50013112015-04-26 16:49:24 +01003308 if (result) {
3309 *result = r;
3310 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003311 if (release_lock) {
3312 qemu_mutex_unlock_iothread();
3313 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003314 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003315}
3316
3317void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3318 MemTxAttrs attrs, MemTxResult *result)
3319{
3320 address_space_stl_internal(as, addr, val, attrs, result,
3321 DEVICE_NATIVE_ENDIAN);
3322}
3323
3324void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3325 MemTxAttrs attrs, MemTxResult *result)
3326{
3327 address_space_stl_internal(as, addr, val, attrs, result,
3328 DEVICE_LITTLE_ENDIAN);
3329}
3330
3331void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3332 MemTxAttrs attrs, MemTxResult *result)
3333{
3334 address_space_stl_internal(as, addr, val, attrs, result,
3335 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003336}
3337
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003338void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003339{
Peter Maydell50013112015-04-26 16:49:24 +01003340 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003341}
3342
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003343void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003344{
Peter Maydell50013112015-04-26 16:49:24 +01003345 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003346}
3347
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003348void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003349{
Peter Maydell50013112015-04-26 16:49:24 +01003350 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003351}
3352
bellardaab33092005-10-30 20:48:42 +00003353/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003354void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3355 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003356{
3357 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003358 MemTxResult r;
3359
3360 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3361 if (result) {
3362 *result = r;
3363 }
3364}
3365
3366void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3367{
3368 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003369}
3370
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003371/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003372static inline void address_space_stw_internal(AddressSpace *as,
3373 hwaddr addr, uint32_t val,
3374 MemTxAttrs attrs,
3375 MemTxResult *result,
3376 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003377{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003378 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003379 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003380 hwaddr l = 2;
3381 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003382 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003383 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003384
Paolo Bonzini41063e12015-03-18 14:21:43 +01003385 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003386 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003387 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003388 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003389
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003390#if defined(TARGET_WORDS_BIGENDIAN)
3391 if (endian == DEVICE_LITTLE_ENDIAN) {
3392 val = bswap16(val);
3393 }
3394#else
3395 if (endian == DEVICE_BIG_ENDIAN) {
3396 val = bswap16(val);
3397 }
3398#endif
Peter Maydell50013112015-04-26 16:49:24 +01003399 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003400 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003401 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003402 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003403 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003404 switch (endian) {
3405 case DEVICE_LITTLE_ENDIAN:
3406 stw_le_p(ptr, val);
3407 break;
3408 case DEVICE_BIG_ENDIAN:
3409 stw_be_p(ptr, val);
3410 break;
3411 default:
3412 stw_p(ptr, val);
3413 break;
3414 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003415 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003416 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003417 }
Peter Maydell50013112015-04-26 16:49:24 +01003418 if (result) {
3419 *result = r;
3420 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003421 if (release_lock) {
3422 qemu_mutex_unlock_iothread();
3423 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003424 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003425}
3426
3427void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3428 MemTxAttrs attrs, MemTxResult *result)
3429{
3430 address_space_stw_internal(as, addr, val, attrs, result,
3431 DEVICE_NATIVE_ENDIAN);
3432}
3433
3434void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3435 MemTxAttrs attrs, MemTxResult *result)
3436{
3437 address_space_stw_internal(as, addr, val, attrs, result,
3438 DEVICE_LITTLE_ENDIAN);
3439}
3440
3441void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3442 MemTxAttrs attrs, MemTxResult *result)
3443{
3444 address_space_stw_internal(as, addr, val, attrs, result,
3445 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003446}
3447
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003448void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003449{
Peter Maydell50013112015-04-26 16:49:24 +01003450 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003451}
3452
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003453void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003454{
Peter Maydell50013112015-04-26 16:49:24 +01003455 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003456}
3457
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003458void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003459{
Peter Maydell50013112015-04-26 16:49:24 +01003460 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003461}
3462
bellardaab33092005-10-30 20:48:42 +00003463/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003464void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3465 MemTxAttrs attrs, MemTxResult *result)
3466{
3467 MemTxResult r;
3468 val = tswap64(val);
3469 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3470 if (result) {
3471 *result = r;
3472 }
3473}
3474
3475void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3476 MemTxAttrs attrs, MemTxResult *result)
3477{
3478 MemTxResult r;
3479 val = cpu_to_le64(val);
3480 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3481 if (result) {
3482 *result = r;
3483 }
3484}
3485void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3486 MemTxAttrs attrs, MemTxResult *result)
3487{
3488 MemTxResult r;
3489 val = cpu_to_be64(val);
3490 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3491 if (result) {
3492 *result = r;
3493 }
3494}
3495
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003496void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003497{
Peter Maydell50013112015-04-26 16:49:24 +01003498 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003499}
3500
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003501void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502{
Peter Maydell50013112015-04-26 16:49:24 +01003503 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504}
3505
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003506void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507{
Peter Maydell50013112015-04-26 16:49:24 +01003508 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003509}
3510
aliguori5e2972f2009-03-28 17:51:36 +00003511/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003512int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003513 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003514{
3515 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003516 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003517 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003518
3519 while (len > 0) {
3520 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003521 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003522 /* if no physical page mapped, return an error */
3523 if (phys_addr == -1)
3524 return -1;
3525 l = (page + TARGET_PAGE_SIZE) - addr;
3526 if (l > len)
3527 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003528 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003529 if (is_write) {
3530 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3531 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003532 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3533 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003534 }
bellard13eb76e2004-01-24 15:23:36 +00003535 len -= l;
3536 buf += l;
3537 addr += l;
3538 }
3539 return 0;
3540}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003541
3542/*
3543 * Allows code that needs to deal with migration bitmaps etc to still be built
3544 * target independent.
3545 */
3546size_t qemu_target_page_bits(void)
3547{
3548 return TARGET_PAGE_BITS;
3549}
3550
Paul Brooka68fe892010-03-01 00:08:59 +00003551#endif
bellard13eb76e2004-01-24 15:23:36 +00003552
Blue Swirl8e4a4242013-01-06 18:30:17 +00003553/*
3554 * A helper function for the _utterly broken_ virtio device model to find out if
3555 * it's running on a big endian machine. Don't do this at home kids!
3556 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003557bool target_words_bigendian(void);
3558bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003559{
3560#if defined(TARGET_WORDS_BIGENDIAN)
3561 return true;
3562#else
3563 return false;
3564#endif
3565}
3566
Wen Congyang76f35532012-05-07 12:04:18 +08003567#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003568bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003569{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003570 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003571 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003572 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003573
Paolo Bonzini41063e12015-03-18 14:21:43 +01003574 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003575 mr = address_space_translate(&address_space_memory,
3576 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003577
Paolo Bonzini41063e12015-03-18 14:21:43 +01003578 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3579 rcu_read_unlock();
3580 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003581}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003582
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003583int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003584{
3585 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003586 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003587
Mike Day0dc3f442013-09-05 14:41:35 -04003588 rcu_read_lock();
3589 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003590 ret = func(block->idstr, block->host, block->offset,
3591 block->used_length, opaque);
3592 if (ret) {
3593 break;
3594 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003595 }
Mike Day0dc3f442013-09-05 14:41:35 -04003596 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003597 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003598}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003599#endif