blob: de1cf191549fbf91cadc5703d1bfff31529c681f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030091/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030092 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030093#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200314static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200315 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000316{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200317 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200318 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200319 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200320
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200321 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200322 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200324 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200325 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100326 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200327 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200328
329 if (sections[lp.ptr].size.hi ||
330 range_covers_byte(sections[lp.ptr].offset_within_address_space,
331 sections[lp.ptr].size.lo, addr)) {
332 return &sections[lp.ptr];
333 } else {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200336}
337
Blue Swirle5548612012-04-21 13:08:33 +0000338bool memory_region_is_unassigned(MemoryRegion *mr)
339{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200340 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000341 && mr != &io_mem_watch;
342}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100344/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200345static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 hwaddr addr,
347 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200348{
Jan Kiszka90260c62013-05-26 21:46:51 +0200349 MemoryRegionSection *section;
350 subpage_t *subpage;
351
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 if (resolve_subpage && section->mr->subpage) {
354 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200355 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 }
357 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200358}
359
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100360/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200361static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200362address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200363 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200364{
365 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200366 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100367 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200368
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200369 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200370 /* Compute offset within MemoryRegionSection */
371 addr -= section->offset_within_address_space;
372
373 /* Compute offset within MemoryRegion */
374 *xlat = addr + section->offset_within_region;
375
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200376 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200377
378 /* MMIO registers can be expected to perform full-width accesses based only
379 * on their address, without considering adjacent registers that could
380 * decode to completely different MemoryRegions. When such registers
381 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
382 * regions overlap wildly. For this reason we cannot clamp the accesses
383 * here.
384 *
385 * If the length is small (as is the case for address_space_ldl/stl),
386 * everything works fine. If the incoming length is large, however,
387 * the caller really has to do the clamping through memory_access_size.
388 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200390 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200391 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
392 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 return section;
394}
Jan Kiszka90260c62013-05-26 21:46:51 +0200395
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100396static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
397{
398 if (memory_region_is_ram(mr)) {
399 return !(is_write && mr->readonly);
400 }
401 if (memory_region_is_romd(mr)) {
402 return !is_write;
403 }
404
405 return false;
406}
407
Paolo Bonzini41063e12015-03-18 14:21:43 +0100408/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200409MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
410 hwaddr *xlat, hwaddr *plen,
411 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200412{
Avi Kivity30951152012-10-30 13:47:46 +0200413 IOMMUTLBEntry iotlb;
414 MemoryRegionSection *section;
415 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200416
417 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100418 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
419 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200420 mr = section->mr;
421
422 if (!mr->iommu_ops) {
423 break;
424 }
425
Le Tan8d7b8cb2014-08-16 13:55:37 +0800426 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200427 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
428 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700429 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200430 if (!(iotlb.perm & (1 << is_write))) {
431 mr = &io_mem_unassigned;
432 break;
433 }
434
435 as = iotlb.target_as;
436 }
437
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000438 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100439 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100441 }
442
Avi Kivity30951152012-10-30 13:47:46 +0200443 *xlat = addr;
444 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200445}
446
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100447/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200448MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200449address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
450 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200451{
Avi Kivity30951152012-10-30 13:47:46 +0200452 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100453 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200454 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200455
456 assert(!section->mr->iommu_ops);
457 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200458}
bellard9fa3e852004-01-04 18:06:42 +0000459#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000460
Andreas Färberb170fce2013-01-20 20:23:22 +0100461#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000462
Juan Quintelae59fb372009-09-29 22:48:21 +0200463static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200464{
Andreas Färber259186a2013-01-17 18:51:17 +0100465 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200466
aurel323098dba2009-03-07 21:28:24 +0000467 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
468 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100469 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100470 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000471
472 return 0;
473}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475static int cpu_common_pre_load(void *opaque)
476{
477 CPUState *cpu = opaque;
478
Paolo Bonziniadee6422014-12-19 12:53:14 +0100479 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400480
481 return 0;
482}
483
484static bool cpu_common_exception_index_needed(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489}
490
491static const VMStateDescription vmstate_cpu_common_exception_index = {
492 .name = "cpu_common/exception_index",
493 .version_id = 1,
494 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200495 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496 .fields = (VMStateField[]) {
497 VMSTATE_INT32(exception_index, CPUState),
498 VMSTATE_END_OF_LIST()
499 }
500};
501
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300502static bool cpu_common_crash_occurred_needed(void *opaque)
503{
504 CPUState *cpu = opaque;
505
506 return cpu->crash_occurred;
507}
508
509static const VMStateDescription vmstate_cpu_common_crash_occurred = {
510 .name = "cpu_common/crash_occurred",
511 .version_id = 1,
512 .minimum_version_id = 1,
513 .needed = cpu_common_crash_occurred_needed,
514 .fields = (VMStateField[]) {
515 VMSTATE_BOOL(crash_occurred, CPUState),
516 VMSTATE_END_OF_LIST()
517 }
518};
519
Andreas Färber1a1562f2013-06-17 04:09:11 +0200520const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 .name = "cpu_common",
522 .version_id = 1,
523 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200525 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200526 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100527 VMSTATE_UINT32(halted, CPUState),
528 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200529 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200531 .subsections = (const VMStateDescription*[]) {
532 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300533 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200534 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 }
536};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200537
pbrook9656f322008-07-01 20:01:19 +0000538#endif
539
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100540CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400541{
Andreas Färberbdc44642013-06-24 23:50:24 +0200542 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400543
Andreas Färberbdc44642013-06-24 23:50:24 +0200544 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100545 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200546 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100547 }
Glauber Costa950f1472009-06-09 12:15:18 -0400548 }
549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400551}
552
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000553#if !defined(CONFIG_USER_ONLY)
554void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
555{
556 /* We only support one address space per cpu at the moment. */
557 assert(cpu->as == as);
558
Peter Maydell32857f42015-10-01 15:29:50 +0100559 if (cpu->cpu_ases) {
560 /* We've already registered the listener for our only AS */
561 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562 }
Peter Maydell32857f42015-10-01 15:29:50 +0100563
564 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
565 cpu->cpu_ases[0].cpu = cpu;
566 cpu->cpu_ases[0].as = as;
567 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
568 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000569}
570#endif
571
Bharata B Raob7bca732015-06-23 19:31:13 -0700572#ifndef CONFIG_USER_ONLY
573static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
574
575static int cpu_get_free_index(Error **errp)
576{
577 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
578
579 if (cpu >= MAX_CPUMASK_BITS) {
580 error_setg(errp, "Trying to use more CPUs than max of %d",
581 MAX_CPUMASK_BITS);
582 return -1;
583 }
584
585 bitmap_set(cpu_index_map, cpu, 1);
586 return cpu;
587}
588
589void cpu_exec_exit(CPUState *cpu)
590{
591 if (cpu->cpu_index == -1) {
592 /* cpu_index was never allocated by this @cpu or was already freed. */
593 return;
594 }
595
596 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
597 cpu->cpu_index = -1;
598}
599#else
600
601static int cpu_get_free_index(Error **errp)
602{
603 CPUState *some_cpu;
604 int cpu_index = 0;
605
606 CPU_FOREACH(some_cpu) {
607 cpu_index++;
608 }
609 return cpu_index;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614}
615#endif
616
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700617void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000618{
Andreas Färberb170fce2013-01-20 20:23:22 +0100619 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000620 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700621 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000622
Eduardo Habkost291135b2015-04-27 17:00:33 -0300623#ifndef CONFIG_USER_ONLY
624 cpu->as = &address_space_memory;
625 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300626#endif
627
pbrookc2764712009-03-07 15:24:59 +0000628#if defined(CONFIG_USER_ONLY)
629 cpu_list_lock();
630#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700631 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
632 if (local_err) {
633 error_propagate(errp, local_err);
634#if defined(CONFIG_USER_ONLY)
635 cpu_list_unlock();
636#endif
637 return;
bellard6a00d602005-11-21 23:25:50 +0000638 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200639 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000640#if defined(CONFIG_USER_ONLY)
641 cpu_list_unlock();
642#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200643 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
644 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
645 }
pbrookb3c77242008-06-30 16:31:04 +0000646#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600647 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700648 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200650 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000651#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100652 if (cc->vmsd != NULL) {
653 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
654 }
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Paul Brook94df27f2010-02-28 23:47:45 +0000657#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200658static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000659{
660 tb_invalidate_phys_page_range(pc, pc + 1, 0);
661}
662#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200663static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400664{
Max Filippove8262a12013-09-27 22:29:17 +0400665 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
666 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000667 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100668 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400669 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400670}
bellardc27004e2005-01-03 23:35:10 +0000671#endif
bellardd720b932004-04-25 17:57:43 +0000672
Paul Brookc527ee82010-03-01 03:31:14 +0000673#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200674void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000675
676{
677}
678
Peter Maydell3ee887e2014-09-12 14:06:48 +0100679int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
680 int flags)
681{
682 return -ENOSYS;
683}
684
685void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
686{
687}
688
Andreas Färber75a34032013-09-02 16:57:02 +0200689int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000690 int flags, CPUWatchpoint **watchpoint)
691{
692 return -ENOSYS;
693}
694#else
pbrook6658ffb2007-03-16 23:58:11 +0000695/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200696int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000697 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000698{
aliguoric0ce9982008-11-25 22:13:57 +0000699 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000700
Peter Maydell05068c02014-09-12 14:06:48 +0100701 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700702 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200703 error_report("tried to set invalid watchpoint at %"
704 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000705 return -EINVAL;
706 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500707 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000708
aliguoria1d1bb32008-11-18 20:07:32 +0000709 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100710 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000711 wp->flags = flags;
712
aliguori2dc9f412008-11-18 20:56:59 +0000713 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200714 if (flags & BP_GDB) {
715 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
716 } else {
717 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
718 }
aliguoria1d1bb32008-11-18 20:07:32 +0000719
Andreas Färber31b030d2013-09-04 01:29:02 +0200720 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000721
722 if (watchpoint)
723 *watchpoint = wp;
724 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000725}
726
aliguoria1d1bb32008-11-18 20:07:32 +0000727/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000729 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000730{
aliguoria1d1bb32008-11-18 20:07:32 +0000731 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000732
Andreas Färberff4700b2013-08-26 18:23:18 +0200733 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100734 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000735 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200736 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000737 return 0;
738 }
739 }
aliguoria1d1bb32008-11-18 20:07:32 +0000740 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000741}
742
aliguoria1d1bb32008-11-18 20:07:32 +0000743/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200744void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000745{
Andreas Färberff4700b2013-08-26 18:23:18 +0200746 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000747
Andreas Färber31b030d2013-09-04 01:29:02 +0200748 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000749
Anthony Liguori7267c092011-08-20 22:09:37 -0500750 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000751}
752
aliguoria1d1bb32008-11-18 20:07:32 +0000753/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200754void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000755{
aliguoric0ce9982008-11-25 22:13:57 +0000756 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000757
Andreas Färberff4700b2013-08-26 18:23:18 +0200758 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200759 if (wp->flags & mask) {
760 cpu_watchpoint_remove_by_ref(cpu, wp);
761 }
aliguoric0ce9982008-11-25 22:13:57 +0000762 }
aliguoria1d1bb32008-11-18 20:07:32 +0000763}
Peter Maydell05068c02014-09-12 14:06:48 +0100764
765/* Return true if this watchpoint address matches the specified
766 * access (ie the address range covered by the watchpoint overlaps
767 * partially or completely with the address range covered by the
768 * access).
769 */
770static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
771 vaddr addr,
772 vaddr len)
773{
774 /* We know the lengths are non-zero, but a little caution is
775 * required to avoid errors in the case where the range ends
776 * exactly at the top of the address space and so addr + len
777 * wraps round to zero.
778 */
779 vaddr wpend = wp->vaddr + wp->len - 1;
780 vaddr addrend = addr + len - 1;
781
782 return !(addr > wpend || wp->vaddr > addrend);
783}
784
Paul Brookc527ee82010-03-01 03:31:14 +0000785#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000786
787/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200788int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000789 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000792
Anthony Liguori7267c092011-08-20 22:09:37 -0500793 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000794
795 bp->pc = pc;
796 bp->flags = flags;
797
aliguori2dc9f412008-11-18 20:56:59 +0000798 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200801 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200802 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200803 }
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Andreas Färberf0c3c502013-08-26 21:22:53 +0200805 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000806
Andreas Färber00b941e2013-06-29 18:55:54 +0200807 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000808 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200809 }
aliguoria1d1bb32008-11-18 20:07:32 +0000810 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000811}
812
813/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200814int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000815{
aliguoria1d1bb32008-11-18 20:07:32 +0000816 CPUBreakpoint *bp;
817
Andreas Färberf0c3c502013-08-26 21:22:53 +0200818 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000819 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000821 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000822 }
bellard4c3a88a2003-07-26 12:06:08 +0000823 }
aliguoria1d1bb32008-11-18 20:07:32 +0000824 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000825}
826
aliguoria1d1bb32008-11-18 20:07:32 +0000827/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200828void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000829{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200830 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
831
832 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000833
Anthony Liguori7267c092011-08-20 22:09:37 -0500834 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000835}
836
837/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200838void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000839{
aliguoric0ce9982008-11-25 22:13:57 +0000840 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200843 if (bp->flags & mask) {
844 cpu_breakpoint_remove_by_ref(cpu, bp);
845 }
aliguoric0ce9982008-11-25 22:13:57 +0000846 }
bellard4c3a88a2003-07-26 12:06:08 +0000847}
848
bellardc33a3462003-07-29 20:50:33 +0000849/* enable or disable single step mode. EXCP_DEBUG is returned by the
850 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200851void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000852{
Andreas Färbered2803d2013-06-21 20:20:45 +0200853 if (cpu->singlestep_enabled != enabled) {
854 cpu->singlestep_enabled = enabled;
855 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200856 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200857 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100858 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000859 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700860 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862 }
bellardc33a3462003-07-29 20:50:33 +0000863}
864
Andreas Färbera47dddd2013-09-03 17:38:47 +0200865void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000866{
867 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000868 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000869
870 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000871 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000872 fprintf(stderr, "qemu: fatal: ");
873 vfprintf(stderr, fmt, ap);
874 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200875 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000876 if (qemu_log_enabled()) {
877 qemu_log("qemu: fatal: ");
878 qemu_log_vprintf(fmt, ap2);
879 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200880 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000881 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000882 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000883 }
pbrook493ae1f2007-11-23 16:53:59 +0000884 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000885 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300886 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200887#if defined(CONFIG_USER_ONLY)
888 {
889 struct sigaction act;
890 sigfillset(&act.sa_mask);
891 act.sa_handler = SIG_DFL;
892 sigaction(SIGABRT, &act, NULL);
893 }
894#endif
bellard75012672003-06-21 13:11:07 +0000895 abort();
896}
897
bellard01243112004-01-04 15:48:17 +0000898#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400899/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200900static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
901{
902 RAMBlock *block;
903
Paolo Bonzini43771532013-09-09 17:58:40 +0200904 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200905 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200906 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907 }
Mike Day0dc3f442013-09-05 14:41:35 -0400908 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200909 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200910 goto found;
911 }
912 }
913
914 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
915 abort();
916
917found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200918 /* It is safe to write mru_block outside the iothread lock. This
919 * is what happens:
920 *
921 * mru_block = xxx
922 * rcu_read_unlock()
923 * xxx removed from list
924 * rcu_read_lock()
925 * read mru_block
926 * mru_block = NULL;
927 * call_rcu(reclaim_ramblock, xxx);
928 * rcu_read_unlock()
929 *
930 * atomic_rcu_set is not needed here. The block was already published
931 * when it was placed into the list. Here we're just making an extra
932 * copy of the pointer.
933 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200934 ram_list.mru_block = block;
935 return block;
936}
937
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200938static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000939{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700940 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200942 RAMBlock *block;
943 ram_addr_t end;
944
945 end = TARGET_PAGE_ALIGN(start + length);
946 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000947
Mike Day0dc3f442013-09-05 14:41:35 -0400948 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 block = qemu_get_ram_block(start);
950 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200951 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700952 CPU_FOREACH(cpu) {
953 tlb_reset_dirty(cpu, start1, length);
954 }
Mike Day0dc3f442013-09-05 14:41:35 -0400955 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200956}
957
958/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000959bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
960 ram_addr_t length,
961 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200962{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963 unsigned long end, page;
964 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200965
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000966 if (length == 0) {
967 return false;
968 }
969
970 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
971 page = start >> TARGET_PAGE_BITS;
972 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
973 page, end - page);
974
975 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200976 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200977 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000978
979 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000980}
981
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100982/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200983hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200984 MemoryRegionSection *section,
985 target_ulong vaddr,
986 hwaddr paddr, hwaddr xlat,
987 int prot,
988 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000989{
Avi Kivitya8170e52012-10-23 12:30:10 +0200990 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000991 CPUWatchpoint *wp;
992
Blue Swirlcc5bea62012-04-14 14:56:48 +0000993 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000994 /* Normal RAM. */
995 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200996 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000997 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000999 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001000 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001001 }
1002 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001003 AddressSpaceDispatch *d;
1004
1005 d = atomic_rcu_read(&section->address_space->dispatch);
1006 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001007 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001008 }
1009
1010 /* Make accesses to pages with watchpoints go via the
1011 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001012 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001013 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001014 /* Avoid trapping reads of pages with a write breakpoint. */
1015 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001016 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001017 *address |= TLB_MMIO;
1018 break;
1019 }
1020 }
1021 }
1022
1023 return iotlb;
1024}
bellard9fa3e852004-01-04 18:06:42 +00001025#endif /* defined(CONFIG_USER_ONLY) */
1026
pbrooke2eef172008-06-08 01:09:01 +00001027#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001028
Anthony Liguoric227f092009-10-01 16:12:16 -05001029static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001030 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001031static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001032
Igor Mammedova2b257d2014-10-31 16:38:37 +00001033static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1034 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001035
1036/*
1037 * Set a custom physical guest memory alloator.
1038 * Accelerators with unusual needs may need this. Hopefully, we can
1039 * get rid of it eventually.
1040 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001041void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001042{
1043 phys_mem_alloc = alloc;
1044}
1045
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001046static uint16_t phys_section_add(PhysPageMap *map,
1047 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001048{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001049 /* The physical section number is ORed with a page-aligned
1050 * pointer to produce the iotlb entries. Thus it should
1051 * never overflow into the page-aligned value.
1052 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001054
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001055 if (map->sections_nb == map->sections_nb_alloc) {
1056 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1057 map->sections = g_renew(MemoryRegionSection, map->sections,
1058 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001059 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001061 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001062 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001063}
1064
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001065static void phys_section_destroy(MemoryRegion *mr)
1066{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001067 memory_region_unref(mr);
1068
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001069 if (mr->subpage) {
1070 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001071 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001072 g_free(subpage);
1073 }
1074}
1075
Paolo Bonzini60926662013-05-29 12:30:26 +02001076static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001077{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001078 while (map->sections_nb > 0) {
1079 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001080 phys_section_destroy(section->mr);
1081 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001082 g_free(map->sections);
1083 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001084}
1085
Avi Kivityac1970f2012-10-03 16:22:53 +02001086static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001087{
1088 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001089 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001090 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001091 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001092 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001093 MemoryRegionSection subsection = {
1094 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001095 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001097 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098
Avi Kivityf3705d52012-03-08 16:16:34 +02001099 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001100
Avi Kivityf3705d52012-03-08 16:16:34 +02001101 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001102 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001103 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001104 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001105 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001108 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109 }
1110 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001111 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001112 subpage_register(subpage, start, end,
1113 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001114}
1115
1116
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001117static void register_multipage(AddressSpaceDispatch *d,
1118 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001119{
Avi Kivitya8170e52012-10-23 12:30:10 +02001120 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001121 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001122 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1123 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001124
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001125 assert(num_pages);
1126 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001127}
1128
Avi Kivityac1970f2012-10-03 16:22:53 +02001129static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001130{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001131 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001132 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001133 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001134 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001136 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1137 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1138 - now.offset_within_address_space;
1139
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001140 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001141 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001142 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001144 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001145 while (int128_ne(remain.size, now.size)) {
1146 remain.size = int128_sub(remain.size, now.size);
1147 remain.offset_within_address_space += int128_get64(now.size);
1148 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001149 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001150 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001151 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001152 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001153 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001154 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001155 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001156 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001157 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001158 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 }
1160}
1161
Sheng Yang62a27442010-01-26 19:21:16 +08001162void qemu_flush_coalesced_mmio_buffer(void)
1163{
1164 if (kvm_enabled())
1165 kvm_flush_coalesced_mmio_buffer();
1166}
1167
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001168void qemu_mutex_lock_ramlist(void)
1169{
1170 qemu_mutex_lock(&ram_list.mutex);
1171}
1172
1173void qemu_mutex_unlock_ramlist(void)
1174{
1175 qemu_mutex_unlock(&ram_list.mutex);
1176}
1177
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001178#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001179
1180#include <sys/vfs.h>
1181
1182#define HUGETLBFS_MAGIC 0x958458f6
1183
Hu Taofc7a5802014-09-09 13:28:01 +08001184static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001185{
1186 struct statfs fs;
1187 int ret;
1188
1189 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001190 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001191 } while (ret != 0 && errno == EINTR);
1192
1193 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001194 error_setg_errno(errp, errno, "failed to get page size of file %s",
1195 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001196 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197 }
1198
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199 return fs.f_bsize;
1200}
1201
Alex Williamson04b16652010-07-02 11:13:17 -06001202static void *file_ram_alloc(RAMBlock *block,
1203 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001204 const char *path,
1205 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001206{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001207 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001208 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001209 char *sanitized_name;
1210 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001211 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001212 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001213 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001214 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215
Hu Taofc7a5802014-09-09 13:28:01 +08001216 hpagesize = gethugepagesize(path, &local_err);
1217 if (local_err) {
1218 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001219 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001220 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001221 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001222
1223 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001224 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1225 "or larger than huge page size 0x%" PRIx64,
1226 memory, hpagesize);
1227 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001228 }
1229
1230 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001231 error_setg(errp,
1232 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001233 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001234 }
1235
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001236 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1237 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1238 sanitized_name = g_strdup(memory_region_name(block->mr));
1239 for (c = sanitized_name; *c != '\0'; c++) {
1240 if (*c == '/') {
1241 *c = '_';
1242 }
1243 }
1244
1245 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1246 sanitized_name);
1247 g_free(sanitized_name);
1248
1249 fd = mkstemp(filename);
1250 if (fd >= 0) {
1251 unlink(filename);
1252 }
1253 g_free(filename);
1254 } else {
1255 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001256 }
1257
Marcelo Tosattic9027602010-03-01 20:25:08 -03001258 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001259 error_setg_errno(errp, errno,
1260 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001261 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001262 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001263
Chen Hanxiao9284f312015-07-24 11:12:03 +08001264 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265
1266 /*
1267 * ftruncate is not supported by hugetlbfs in older
1268 * hosts, so don't bother bailing out on errors.
1269 * If anything goes wrong with it under other filesystems,
1270 * mmap will fail.
1271 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001272 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001273 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001274 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001275
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001276 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001277 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001278 error_setg_errno(errp, errno,
1279 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001280 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001281 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001282 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001283
1284 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001285 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001286 }
1287
Alex Williamson04b16652010-07-02 11:13:17 -06001288 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001289 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001290
1291error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001292 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001293}
1294#endif
1295
Mike Day0dc3f442013-09-05 14:41:35 -04001296/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001297static ram_addr_t find_ram_offset(ram_addr_t size)
1298{
Alex Williamson04b16652010-07-02 11:13:17 -06001299 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001300 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001301
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001302 assert(size != 0); /* it would hand out same offset multiple times */
1303
Mike Day0dc3f442013-09-05 14:41:35 -04001304 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001305 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001306 }
Alex Williamson04b16652010-07-02 11:13:17 -06001307
Mike Day0dc3f442013-09-05 14:41:35 -04001308 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001309 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001310
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001311 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001312
Mike Day0dc3f442013-09-05 14:41:35 -04001313 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001314 if (next_block->offset >= end) {
1315 next = MIN(next, next_block->offset);
1316 }
1317 }
1318 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001319 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001320 mingap = next - end;
1321 }
1322 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001323
1324 if (offset == RAM_ADDR_MAX) {
1325 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1326 (uint64_t)size);
1327 abort();
1328 }
1329
Alex Williamson04b16652010-07-02 11:13:17 -06001330 return offset;
1331}
1332
Juan Quintela652d7ec2012-07-20 10:37:54 +02001333ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001334{
Alex Williamsond17b5282010-06-25 11:08:38 -06001335 RAMBlock *block;
1336 ram_addr_t last = 0;
1337
Mike Day0dc3f442013-09-05 14:41:35 -04001338 rcu_read_lock();
1339 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001340 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001341 }
Mike Day0dc3f442013-09-05 14:41:35 -04001342 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001343 return last;
1344}
1345
Jason Baronddb97f12012-08-02 15:44:16 -04001346static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1347{
1348 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001349
1350 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001351 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001352 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1353 if (ret) {
1354 perror("qemu_madvise");
1355 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1356 "but dump_guest_core=off specified\n");
1357 }
1358 }
1359}
1360
Mike Day0dc3f442013-09-05 14:41:35 -04001361/* Called within an RCU critical section, or while the ramlist lock
1362 * is held.
1363 */
Hu Tao20cfe882014-04-02 15:13:26 +08001364static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001365{
Hu Tao20cfe882014-04-02 15:13:26 +08001366 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001367
Mike Day0dc3f442013-09-05 14:41:35 -04001368 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001369 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001370 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001371 }
1372 }
Hu Tao20cfe882014-04-02 15:13:26 +08001373
1374 return NULL;
1375}
1376
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001377const char *qemu_ram_get_idstr(RAMBlock *rb)
1378{
1379 return rb->idstr;
1380}
1381
Mike Dayae3a7042013-09-05 14:41:35 -04001382/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001383void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1384{
Mike Dayae3a7042013-09-05 14:41:35 -04001385 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001386
Mike Day0dc3f442013-09-05 14:41:35 -04001387 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001388 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001389 assert(new_block);
1390 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001391
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001392 if (dev) {
1393 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001394 if (id) {
1395 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001396 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001397 }
1398 }
1399 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1400
Mike Day0dc3f442013-09-05 14:41:35 -04001401 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001402 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001403 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1404 new_block->idstr);
1405 abort();
1406 }
1407 }
Mike Day0dc3f442013-09-05 14:41:35 -04001408 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001409}
1410
Mike Dayae3a7042013-09-05 14:41:35 -04001411/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001412void qemu_ram_unset_idstr(ram_addr_t addr)
1413{
Mike Dayae3a7042013-09-05 14:41:35 -04001414 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001415
Mike Dayae3a7042013-09-05 14:41:35 -04001416 /* FIXME: arch_init.c assumes that this is not called throughout
1417 * migration. Ignore the problem since hot-unplug during migration
1418 * does not work anyway.
1419 */
1420
Mike Day0dc3f442013-09-05 14:41:35 -04001421 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001422 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001423 if (block) {
1424 memset(block->idstr, 0, sizeof(block->idstr));
1425 }
Mike Day0dc3f442013-09-05 14:41:35 -04001426 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001427}
1428
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001429static int memory_try_enable_merging(void *addr, size_t len)
1430{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001431 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001432 /* disabled by the user */
1433 return 0;
1434 }
1435
1436 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1437}
1438
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001439/* Only legal before guest might have detected the memory size: e.g. on
1440 * incoming migration, or right after reset.
1441 *
1442 * As memory core doesn't know how is memory accessed, it is up to
1443 * resize callback to update device state and/or add assertions to detect
1444 * misuse, if necessary.
1445 */
1446int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1447{
1448 RAMBlock *block = find_ram_block(base);
1449
1450 assert(block);
1451
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001452 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001453
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001454 if (block->used_length == newsize) {
1455 return 0;
1456 }
1457
1458 if (!(block->flags & RAM_RESIZEABLE)) {
1459 error_setg_errno(errp, EINVAL,
1460 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1461 " in != 0x" RAM_ADDR_FMT, block->idstr,
1462 newsize, block->used_length);
1463 return -EINVAL;
1464 }
1465
1466 if (block->max_length < newsize) {
1467 error_setg_errno(errp, EINVAL,
1468 "Length too large: %s: 0x" RAM_ADDR_FMT
1469 " > 0x" RAM_ADDR_FMT, block->idstr,
1470 newsize, block->max_length);
1471 return -EINVAL;
1472 }
1473
1474 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1475 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001476 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1477 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001478 memory_region_set_size(block->mr, newsize);
1479 if (block->resized) {
1480 block->resized(block->idstr, newsize, block->host);
1481 }
1482 return 0;
1483}
1484
Hu Taoef701d72014-09-09 13:27:54 +08001485static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001486{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001487 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001488 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001489 ram_addr_t old_ram_size, new_ram_size;
1490
1491 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001492
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001493 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001494 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001495
1496 if (!new_block->host) {
1497 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001498 xen_ram_alloc(new_block->offset, new_block->max_length,
1499 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001500 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001501 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001502 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001503 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001504 error_setg_errno(errp, errno,
1505 "cannot set up guest memory '%s'",
1506 memory_region_name(new_block->mr));
1507 qemu_mutex_unlock_ramlist();
1508 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001509 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001510 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001511 }
1512 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001513
Li Zhijiandd631692015-07-02 20:18:06 +08001514 new_ram_size = MAX(old_ram_size,
1515 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1516 if (new_ram_size > old_ram_size) {
1517 migration_bitmap_extend(old_ram_size, new_ram_size);
1518 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001519 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1520 * QLIST (which has an RCU-friendly variant) does not have insertion at
1521 * tail, so save the last element in last_block.
1522 */
Mike Day0dc3f442013-09-05 14:41:35 -04001523 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001524 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001525 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001526 break;
1527 }
1528 }
1529 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001530 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001531 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001533 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001534 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001535 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001536 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001537
Mike Day0dc3f442013-09-05 14:41:35 -04001538 /* Write list before version */
1539 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001540 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001541 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001542
Juan Quintela2152f5c2013-10-08 13:52:02 +02001543 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1544
1545 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001546 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001547
1548 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001549 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1550 ram_list.dirty_memory[i] =
1551 bitmap_zero_extend(ram_list.dirty_memory[i],
1552 old_ram_size, new_ram_size);
1553 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001554 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001555 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001556 new_block->used_length,
1557 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001558
Paolo Bonzinia904c912015-01-21 16:18:35 +01001559 if (new_block->host) {
1560 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1561 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1562 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1563 if (kvm_enabled()) {
1564 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1565 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001566 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001567
1568 return new_block->offset;
1569}
1570
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001571#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001572ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001573 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001574 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575{
1576 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001577 ram_addr_t addr;
1578 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001579
1580 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001581 error_setg(errp, "-mem-path not supported with Xen");
1582 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001583 }
1584
1585 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1586 /*
1587 * file_ram_alloc() needs to allocate just like
1588 * phys_mem_alloc, but we haven't bothered to provide
1589 * a hook there.
1590 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001591 error_setg(errp,
1592 "-mem-path not supported with this accelerator");
1593 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001594 }
1595
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001596 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001597 new_block = g_malloc0(sizeof(*new_block));
1598 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001599 new_block->used_length = size;
1600 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001601 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001602 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001603 new_block->host = file_ram_alloc(new_block, size,
1604 mem_path, errp);
1605 if (!new_block->host) {
1606 g_free(new_block);
1607 return -1;
1608 }
1609
Hu Taoef701d72014-09-09 13:27:54 +08001610 addr = ram_block_add(new_block, &local_err);
1611 if (local_err) {
1612 g_free(new_block);
1613 error_propagate(errp, local_err);
1614 return -1;
1615 }
1616 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001617}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001618#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001619
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001620static
1621ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1622 void (*resized)(const char*,
1623 uint64_t length,
1624 void *host),
1625 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001626 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001627{
1628 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001629 ram_addr_t addr;
1630 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001631
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001632 size = HOST_PAGE_ALIGN(size);
1633 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001634 new_block = g_malloc0(sizeof(*new_block));
1635 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001636 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001637 new_block->used_length = size;
1638 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001639 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001640 new_block->fd = -1;
1641 new_block->host = host;
1642 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001643 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001644 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001645 if (resizeable) {
1646 new_block->flags |= RAM_RESIZEABLE;
1647 }
Hu Taoef701d72014-09-09 13:27:54 +08001648 addr = ram_block_add(new_block, &local_err);
1649 if (local_err) {
1650 g_free(new_block);
1651 error_propagate(errp, local_err);
1652 return -1;
1653 }
1654 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001655}
1656
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001657ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1658 MemoryRegion *mr, Error **errp)
1659{
1660 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1661}
1662
Hu Taoef701d72014-09-09 13:27:54 +08001663ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001664{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001665 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1666}
1667
1668ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1669 void (*resized)(const char*,
1670 uint64_t length,
1671 void *host),
1672 MemoryRegion *mr, Error **errp)
1673{
1674 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001675}
bellarde9a1ab12007-02-08 23:08:38 +00001676
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001677void qemu_ram_free_from_ptr(ram_addr_t addr)
1678{
1679 RAMBlock *block;
1680
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001681 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001682 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001683 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001684 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001685 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001686 /* Write list before version */
1687 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001688 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001689 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001690 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001691 }
1692 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001693 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001694}
1695
Paolo Bonzini43771532013-09-09 17:58:40 +02001696static void reclaim_ramblock(RAMBlock *block)
1697{
1698 if (block->flags & RAM_PREALLOC) {
1699 ;
1700 } else if (xen_enabled()) {
1701 xen_invalidate_map_cache_entry(block->host);
1702#ifndef _WIN32
1703 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001704 if (block->flags & RAM_FILE) {
1705 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001706 } else {
1707 munmap(block->host, block->max_length);
1708 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001709 close(block->fd);
1710#endif
1711 } else {
1712 qemu_anon_ram_free(block->host, block->max_length);
1713 }
1714 g_free(block);
1715}
1716
Anthony Liguoric227f092009-10-01 16:12:16 -05001717void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001718{
Alex Williamson04b16652010-07-02 11:13:17 -06001719 RAMBlock *block;
1720
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001721 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001722 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001723 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001724 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001725 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001726 /* Write list before version */
1727 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001728 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001729 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001730 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001731 }
1732 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001733 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001734}
1735
Huang Yingcd19cfa2011-03-02 08:56:19 +01001736#ifndef _WIN32
1737void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1738{
1739 RAMBlock *block;
1740 ram_addr_t offset;
1741 int flags;
1742 void *area, *vaddr;
1743
Mike Day0dc3f442013-09-05 14:41:35 -04001744 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001745 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001746 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001747 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001748 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001749 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001750 } else if (xen_enabled()) {
1751 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001752 } else {
1753 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001754 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001755 flags |= (block->flags & RAM_SHARED ?
1756 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001757 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1758 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001759 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001760 /*
1761 * Remap needs to match alloc. Accelerators that
1762 * set phys_mem_alloc never remap. If they did,
1763 * we'd need a remap hook here.
1764 */
1765 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1766
Huang Yingcd19cfa2011-03-02 08:56:19 +01001767 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1768 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1769 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001770 }
1771 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001772 fprintf(stderr, "Could not remap addr: "
1773 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774 length, addr);
1775 exit(1);
1776 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001777 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001778 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001779 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001780 }
1781 }
1782}
1783#endif /* !_WIN32 */
1784
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001785int qemu_get_ram_fd(ram_addr_t addr)
1786{
Mike Dayae3a7042013-09-05 14:41:35 -04001787 RAMBlock *block;
1788 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001789
Mike Day0dc3f442013-09-05 14:41:35 -04001790 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001791 block = qemu_get_ram_block(addr);
1792 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001793 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001794 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001795}
1796
Damjan Marion3fd74b82014-06-26 23:01:32 +02001797void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1798{
Mike Dayae3a7042013-09-05 14:41:35 -04001799 RAMBlock *block;
1800 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001801
Mike Day0dc3f442013-09-05 14:41:35 -04001802 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001803 block = qemu_get_ram_block(addr);
1804 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001805 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001806 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001807}
1808
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001809/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001810 * This should not be used for general purpose DMA. Use address_space_map
1811 * or address_space_rw instead. For local memory (e.g. video ram) that the
1812 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001813 *
1814 * By the time this function returns, the returned pointer is not protected
1815 * by RCU anymore. If the caller is not within an RCU critical section and
1816 * does not hold the iothread lock, it must have other means of protecting the
1817 * pointer, such as a reference to the region that includes the incoming
1818 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001819 */
1820void *qemu_get_ram_ptr(ram_addr_t addr)
1821{
Mike Dayae3a7042013-09-05 14:41:35 -04001822 RAMBlock *block;
1823 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001824
Mike Day0dc3f442013-09-05 14:41:35 -04001825 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001826 block = qemu_get_ram_block(addr);
1827
1828 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001829 /* We need to check if the requested address is in the RAM
1830 * because we don't want to map the entire memory in QEMU.
1831 * In that case just map until the end of the page.
1832 */
1833 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001834 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001835 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001836 }
Mike Dayae3a7042013-09-05 14:41:35 -04001837
1838 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001839 }
Mike Dayae3a7042013-09-05 14:41:35 -04001840 ptr = ramblock_ptr(block, addr - block->offset);
1841
Mike Day0dc3f442013-09-05 14:41:35 -04001842unlock:
1843 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001844 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001845}
1846
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001847/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001848 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001849 *
1850 * By the time this function returns, the returned pointer is not protected
1851 * by RCU anymore. If the caller is not within an RCU critical section and
1852 * does not hold the iothread lock, it must have other means of protecting the
1853 * pointer, such as a reference to the region that includes the incoming
1854 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001855 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001856static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001857{
Mike Dayae3a7042013-09-05 14:41:35 -04001858 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001859 if (*size == 0) {
1860 return NULL;
1861 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001862 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001863 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001864 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001865 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001866 rcu_read_lock();
1867 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001868 if (addr - block->offset < block->max_length) {
1869 if (addr - block->offset + *size > block->max_length)
1870 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001871 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001872 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001873 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001874 }
1875 }
1876
1877 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1878 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001879 }
1880}
1881
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001882/*
1883 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1884 * in that RAMBlock.
1885 *
1886 * ptr: Host pointer to look up
1887 * round_offset: If true round the result offset down to a page boundary
1888 * *ram_addr: set to result ram_addr
1889 * *offset: set to result offset within the RAMBlock
1890 *
1891 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001892 *
1893 * By the time this function returns, the returned pointer is not protected
1894 * by RCU anymore. If the caller is not within an RCU critical section and
1895 * does not hold the iothread lock, it must have other means of protecting the
1896 * pointer, such as a reference to the region that includes the incoming
1897 * ram_addr_t.
1898 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001899RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1900 ram_addr_t *ram_addr,
1901 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001902{
pbrook94a6b542009-04-11 17:15:54 +00001903 RAMBlock *block;
1904 uint8_t *host = ptr;
1905
Jan Kiszka868bb332011-06-21 22:59:09 +02001906 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001907 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001908 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001909 block = qemu_get_ram_block(*ram_addr);
1910 if (block) {
1911 *offset = (host - block->host);
1912 }
Mike Day0dc3f442013-09-05 14:41:35 -04001913 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001914 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001915 }
1916
Mike Day0dc3f442013-09-05 14:41:35 -04001917 rcu_read_lock();
1918 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001919 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001920 goto found;
1921 }
1922
Mike Day0dc3f442013-09-05 14:41:35 -04001923 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001924 /* This case append when the block is not mapped. */
1925 if (block->host == NULL) {
1926 continue;
1927 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001928 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001929 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001930 }
pbrook94a6b542009-04-11 17:15:54 +00001931 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001932
Mike Day0dc3f442013-09-05 14:41:35 -04001933 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001934 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001935
1936found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001937 *offset = (host - block->host);
1938 if (round_offset) {
1939 *offset &= TARGET_PAGE_MASK;
1940 }
1941 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001942 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001943 return block;
1944}
1945
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001946/*
1947 * Finds the named RAMBlock
1948 *
1949 * name: The name of RAMBlock to find
1950 *
1951 * Returns: RAMBlock (or NULL if not found)
1952 */
1953RAMBlock *qemu_ram_block_by_name(const char *name)
1954{
1955 RAMBlock *block;
1956
1957 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1958 if (!strcmp(name, block->idstr)) {
1959 return block;
1960 }
1961 }
1962
1963 return NULL;
1964}
1965
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001966/* Some of the softmmu routines need to translate from a host pointer
1967 (typically a TLB entry) back to a ram offset. */
1968MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1969{
1970 RAMBlock *block;
1971 ram_addr_t offset; /* Not used */
1972
1973 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1974
1975 if (!block) {
1976 return NULL;
1977 }
1978
1979 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001980}
Alex Williamsonf471a172010-06-11 11:11:42 -06001981
Avi Kivitya8170e52012-10-23 12:30:10 +02001982static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001983 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001984{
Juan Quintela52159192013-10-08 12:44:04 +02001985 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001986 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001987 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001988 switch (size) {
1989 case 1:
1990 stb_p(qemu_get_ram_ptr(ram_addr), val);
1991 break;
1992 case 2:
1993 stw_p(qemu_get_ram_ptr(ram_addr), val);
1994 break;
1995 case 4:
1996 stl_p(qemu_get_ram_ptr(ram_addr), val);
1997 break;
1998 default:
1999 abort();
2000 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002001 /* Set both VGA and migration bits for simplicity and to remove
2002 * the notdirty callback faster.
2003 */
2004 cpu_physical_memory_set_dirty_range(ram_addr, size,
2005 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002006 /* we remove the notdirty callback only if the code has been
2007 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002008 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002009 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002010 }
bellard1ccde1c2004-02-06 19:46:14 +00002011}
2012
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002013static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2014 unsigned size, bool is_write)
2015{
2016 return is_write;
2017}
2018
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002019static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002020 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002021 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002022 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002023};
2024
pbrook0f459d12008-06-09 00:20:13 +00002025/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002026static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002027{
Andreas Färber93afead2013-08-26 03:41:01 +02002028 CPUState *cpu = current_cpu;
2029 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002030 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002031 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002032 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002033 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002034
Andreas Färberff4700b2013-08-26 18:23:18 +02002035 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002036 /* We re-entered the check after replacing the TB. Now raise
2037 * the debug interrupt so that is will trigger after the
2038 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002039 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002040 return;
2041 }
Andreas Färber93afead2013-08-26 03:41:01 +02002042 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002043 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002044 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2045 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002046 if (flags == BP_MEM_READ) {
2047 wp->flags |= BP_WATCHPOINT_HIT_READ;
2048 } else {
2049 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2050 }
2051 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002052 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002053 if (!cpu->watchpoint_hit) {
2054 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002055 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002056 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002057 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002058 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002059 } else {
2060 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002061 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002062 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002063 }
aliguori06d55cc2008-11-18 20:24:06 +00002064 }
aliguori6e140f22008-11-18 20:37:55 +00002065 } else {
2066 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002067 }
2068 }
2069}
2070
pbrook6658ffb2007-03-16 23:58:11 +00002071/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2072 so these check for a hit then pass through to the normal out-of-line
2073 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002074static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2075 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002076{
Peter Maydell66b9b432015-04-26 16:49:24 +01002077 MemTxResult res;
2078 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002079
Peter Maydell66b9b432015-04-26 16:49:24 +01002080 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002081 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002082 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002083 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002084 break;
2085 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002086 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002087 break;
2088 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002089 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002090 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002091 default: abort();
2092 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002093 *pdata = data;
2094 return res;
2095}
2096
2097static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2098 uint64_t val, unsigned size,
2099 MemTxAttrs attrs)
2100{
2101 MemTxResult res;
2102
2103 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2104 switch (size) {
2105 case 1:
2106 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2107 break;
2108 case 2:
2109 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2110 break;
2111 case 4:
2112 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2113 break;
2114 default: abort();
2115 }
2116 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002117}
2118
Avi Kivity1ec9b902012-01-02 12:47:48 +02002119static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002120 .read_with_attrs = watch_mem_read,
2121 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002122 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002123};
pbrook6658ffb2007-03-16 23:58:11 +00002124
Peter Maydellf25a49e2015-04-26 16:49:24 +01002125static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2126 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002127{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002129 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002130 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002131
blueswir1db7b5422007-05-26 17:36:03 +00002132#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002133 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002135#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002136 res = address_space_read(subpage->as, addr + subpage->base,
2137 attrs, buf, len);
2138 if (res) {
2139 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002140 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002141 switch (len) {
2142 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002143 *data = ldub_p(buf);
2144 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002145 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002146 *data = lduw_p(buf);
2147 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002148 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002149 *data = ldl_p(buf);
2150 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002151 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002152 *data = ldq_p(buf);
2153 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002154 default:
2155 abort();
2156 }
blueswir1db7b5422007-05-26 17:36:03 +00002157}
2158
Peter Maydellf25a49e2015-04-26 16:49:24 +01002159static MemTxResult subpage_write(void *opaque, hwaddr addr,
2160 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002161{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002162 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002163 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002164
blueswir1db7b5422007-05-26 17:36:03 +00002165#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002166 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002167 " value %"PRIx64"\n",
2168 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002169#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002170 switch (len) {
2171 case 1:
2172 stb_p(buf, value);
2173 break;
2174 case 2:
2175 stw_p(buf, value);
2176 break;
2177 case 4:
2178 stl_p(buf, value);
2179 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002180 case 8:
2181 stq_p(buf, value);
2182 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002183 default:
2184 abort();
2185 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002186 return address_space_write(subpage->as, addr + subpage->base,
2187 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002188}
2189
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002190static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002191 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002192{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002193 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002194#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002195 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002196 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002197#endif
2198
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002200 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002201}
2202
Avi Kivity70c68e42012-01-02 12:32:48 +02002203static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002204 .read_with_attrs = subpage_read,
2205 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002206 .impl.min_access_size = 1,
2207 .impl.max_access_size = 8,
2208 .valid.min_access_size = 1,
2209 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002210 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002211 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002212};
2213
Anthony Liguoric227f092009-10-01 16:12:16 -05002214static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002215 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002216{
2217 int idx, eidx;
2218
2219 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2220 return -1;
2221 idx = SUBPAGE_IDX(start);
2222 eidx = SUBPAGE_IDX(end);
2223#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002224 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2225 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002226#endif
blueswir1db7b5422007-05-26 17:36:03 +00002227 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002228 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002229 }
2230
2231 return 0;
2232}
2233
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002234static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002235{
Anthony Liguoric227f092009-10-01 16:12:16 -05002236 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002237
Anthony Liguori7267c092011-08-20 22:09:37 -05002238 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002239
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002240 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002241 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002242 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002243 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002244 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002245#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002246 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2247 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002248#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002249 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002250
2251 return mmio;
2252}
2253
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002254static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2255 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002256{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002257 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002258 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002259 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002260 .mr = mr,
2261 .offset_within_address_space = 0,
2262 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002263 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002264 };
2265
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002266 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002267}
2268
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002269MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002270{
Peter Maydell32857f42015-10-01 15:29:50 +01002271 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2272 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002273 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002274
2275 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002276}
2277
Avi Kivitye9179ce2009-06-14 11:38:52 +03002278static void io_mem_init(void)
2279{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002280 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002281 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002282 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002283 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002284 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002285 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002286 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002287}
2288
Avi Kivityac1970f2012-10-03 16:22:53 +02002289static void mem_begin(MemoryListener *listener)
2290{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002291 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002292 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2293 uint16_t n;
2294
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002295 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002296 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002297 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002298 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002299 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002300 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002301 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002303
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002304 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002305 d->as = as;
2306 as->next_dispatch = d;
2307}
2308
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002309static void address_space_dispatch_free(AddressSpaceDispatch *d)
2310{
2311 phys_sections_free(&d->map);
2312 g_free(d);
2313}
2314
Paolo Bonzini00752702013-05-29 12:13:54 +02002315static void mem_commit(MemoryListener *listener)
2316{
2317 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002318 AddressSpaceDispatch *cur = as->dispatch;
2319 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002320
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002321 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002323 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002324 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002325 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002326 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002327}
2328
Avi Kivity1d711482012-10-02 18:54:45 +02002329static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002330{
Peter Maydell32857f42015-10-01 15:29:50 +01002331 CPUAddressSpace *cpuas;
2332 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002333
2334 /* since each CPU stores ram addresses in its TLB cache, we must
2335 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002336 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2337 cpu_reloading_memory_map();
2338 /* The CPU and TLB are protected by the iothread lock.
2339 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2340 * may have split the RCU critical section.
2341 */
2342 d = atomic_rcu_read(&cpuas->as->dispatch);
2343 cpuas->memory_dispatch = d;
2344 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002345}
2346
Avi Kivityac1970f2012-10-03 16:22:53 +02002347void address_space_init_dispatch(AddressSpace *as)
2348{
Paolo Bonzini00752702013-05-29 12:13:54 +02002349 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002350 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002351 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002352 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002353 .region_add = mem_add,
2354 .region_nop = mem_add,
2355 .priority = 0,
2356 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002357 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002358}
2359
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002360void address_space_unregister(AddressSpace *as)
2361{
2362 memory_listener_unregister(&as->dispatch_listener);
2363}
2364
Avi Kivity83f3c252012-10-07 12:59:55 +02002365void address_space_destroy_dispatch(AddressSpace *as)
2366{
2367 AddressSpaceDispatch *d = as->dispatch;
2368
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002369 atomic_rcu_set(&as->dispatch, NULL);
2370 if (d) {
2371 call_rcu(d, address_space_dispatch_free, rcu);
2372 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002373}
2374
Avi Kivity62152b82011-07-26 14:26:14 +03002375static void memory_map_init(void)
2376{
Anthony Liguori7267c092011-08-20 22:09:37 -05002377 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002378
Paolo Bonzini57271d62013-11-07 17:14:37 +01002379 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002380 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002381
Anthony Liguori7267c092011-08-20 22:09:37 -05002382 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002383 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2384 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002385 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002386}
2387
2388MemoryRegion *get_system_memory(void)
2389{
2390 return system_memory;
2391}
2392
Avi Kivity309cb472011-08-08 16:09:03 +03002393MemoryRegion *get_system_io(void)
2394{
2395 return system_io;
2396}
2397
pbrooke2eef172008-06-08 01:09:01 +00002398#endif /* !defined(CONFIG_USER_ONLY) */
2399
bellard13eb76e2004-01-24 15:23:36 +00002400/* physical memory access (slow version, mainly for debug) */
2401#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002402int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002403 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002404{
2405 int l, flags;
2406 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002407 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002408
2409 while (len > 0) {
2410 page = addr & TARGET_PAGE_MASK;
2411 l = (page + TARGET_PAGE_SIZE) - addr;
2412 if (l > len)
2413 l = len;
2414 flags = page_get_flags(page);
2415 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002416 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002417 if (is_write) {
2418 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002419 return -1;
bellard579a97f2007-11-11 14:26:47 +00002420 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002421 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002422 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002423 memcpy(p, buf, l);
2424 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002425 } else {
2426 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002427 return -1;
bellard579a97f2007-11-11 14:26:47 +00002428 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002429 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002430 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002431 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002432 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002433 }
2434 len -= l;
2435 buf += l;
2436 addr += l;
2437 }
Paul Brooka68fe892010-03-01 00:08:59 +00002438 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002439}
bellard8df1cd02005-01-28 22:37:22 +00002440
bellard13eb76e2004-01-24 15:23:36 +00002441#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002442
Paolo Bonzini845b6212015-03-23 11:45:53 +01002443static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002444 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002445{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002446 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2447 /* No early return if dirty_log_mask is or becomes 0, because
2448 * cpu_physical_memory_set_dirty_range will still call
2449 * xen_modified_memory.
2450 */
2451 if (dirty_log_mask) {
2452 dirty_log_mask =
2453 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002454 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002455 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2456 tb_invalidate_phys_range(addr, addr + length);
2457 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2458 }
2459 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002460}
2461
Richard Henderson23326162013-07-08 14:55:59 -07002462static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002463{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002464 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002465
2466 /* Regions are assumed to support 1-4 byte accesses unless
2467 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002468 if (access_size_max == 0) {
2469 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002470 }
Richard Henderson23326162013-07-08 14:55:59 -07002471
2472 /* Bound the maximum access by the alignment of the address. */
2473 if (!mr->ops->impl.unaligned) {
2474 unsigned align_size_max = addr & -addr;
2475 if (align_size_max != 0 && align_size_max < access_size_max) {
2476 access_size_max = align_size_max;
2477 }
2478 }
2479
2480 /* Don't attempt accesses larger than the maximum. */
2481 if (l > access_size_max) {
2482 l = access_size_max;
2483 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002484 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002485
2486 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002487}
2488
Jan Kiszka4840f102015-06-18 18:47:22 +02002489static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002490{
Jan Kiszka4840f102015-06-18 18:47:22 +02002491 bool unlocked = !qemu_mutex_iothread_locked();
2492 bool release_lock = false;
2493
2494 if (unlocked && mr->global_locking) {
2495 qemu_mutex_lock_iothread();
2496 unlocked = false;
2497 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002498 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002499 if (mr->flush_coalesced_mmio) {
2500 if (unlocked) {
2501 qemu_mutex_lock_iothread();
2502 }
2503 qemu_flush_coalesced_mmio_buffer();
2504 if (unlocked) {
2505 qemu_mutex_unlock_iothread();
2506 }
2507 }
2508
2509 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002510}
2511
Peter Maydell5c9eb022015-04-26 16:49:24 +01002512MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2513 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002514{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002515 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002516 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002517 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002518 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002519 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002520 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002521 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002522
Paolo Bonzini41063e12015-03-18 14:21:43 +01002523 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002524 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002525 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002526 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002527
bellard13eb76e2004-01-24 15:23:36 +00002528 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002529 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002530 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002531 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002532 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002533 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002534 switch (l) {
2535 case 8:
2536 /* 64 bit write access */
2537 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002538 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2539 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002540 break;
2541 case 4:
bellard1c213d12005-09-03 10:49:04 +00002542 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002543 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002544 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2545 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002546 break;
2547 case 2:
bellard1c213d12005-09-03 10:49:04 +00002548 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002549 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002550 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2551 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002552 break;
2553 case 1:
bellard1c213d12005-09-03 10:49:04 +00002554 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002555 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002556 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2557 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002558 break;
2559 default:
2560 abort();
bellard13eb76e2004-01-24 15:23:36 +00002561 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002562 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002563 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002564 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002565 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002566 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002567 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002568 }
2569 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002570 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002571 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002572 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002573 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002574 switch (l) {
2575 case 8:
2576 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002577 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2578 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002579 stq_p(buf, val);
2580 break;
2581 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002582 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002583 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2584 attrs);
bellardc27004e2005-01-03 23:35:10 +00002585 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002586 break;
2587 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002588 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002589 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2590 attrs);
bellardc27004e2005-01-03 23:35:10 +00002591 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002592 break;
2593 case 1:
bellard1c213d12005-09-03 10:49:04 +00002594 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002595 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2596 attrs);
bellardc27004e2005-01-03 23:35:10 +00002597 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002598 break;
2599 default:
2600 abort();
bellard13eb76e2004-01-24 15:23:36 +00002601 }
2602 } else {
2603 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002604 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002605 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002606 }
2607 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002608
2609 if (release_lock) {
2610 qemu_mutex_unlock_iothread();
2611 release_lock = false;
2612 }
2613
bellard13eb76e2004-01-24 15:23:36 +00002614 len -= l;
2615 buf += l;
2616 addr += l;
2617 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002618 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002619
Peter Maydell3b643492015-04-26 16:49:23 +01002620 return result;
bellard13eb76e2004-01-24 15:23:36 +00002621}
bellard8df1cd02005-01-28 22:37:22 +00002622
Peter Maydell5c9eb022015-04-26 16:49:24 +01002623MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2624 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002625{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002626 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002627}
2628
Peter Maydell5c9eb022015-04-26 16:49:24 +01002629MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2630 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002631{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002632 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002633}
2634
2635
Avi Kivitya8170e52012-10-23 12:30:10 +02002636void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002637 int len, int is_write)
2638{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002639 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2640 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002641}
2642
Alexander Graf582b55a2013-12-11 14:17:44 +01002643enum write_rom_type {
2644 WRITE_DATA,
2645 FLUSH_CACHE,
2646};
2647
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002648static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002649 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002650{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002651 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002652 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002653 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002654 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002655
Paolo Bonzini41063e12015-03-18 14:21:43 +01002656 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002657 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002658 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002659 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002660
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002661 if (!(memory_region_is_ram(mr) ||
2662 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002663 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002664 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002665 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002666 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002667 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002668 switch (type) {
2669 case WRITE_DATA:
2670 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002671 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002672 break;
2673 case FLUSH_CACHE:
2674 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2675 break;
2676 }
bellardd0ecd2a2006-04-23 17:14:48 +00002677 }
2678 len -= l;
2679 buf += l;
2680 addr += l;
2681 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002682 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002683}
2684
Alexander Graf582b55a2013-12-11 14:17:44 +01002685/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002686void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002687 const uint8_t *buf, int len)
2688{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002689 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002690}
2691
2692void cpu_flush_icache_range(hwaddr start, int len)
2693{
2694 /*
2695 * This function should do the same thing as an icache flush that was
2696 * triggered from within the guest. For TCG we are always cache coherent,
2697 * so there is no need to flush anything. For KVM / Xen we need to flush
2698 * the host's instruction cache at least.
2699 */
2700 if (tcg_enabled()) {
2701 return;
2702 }
2703
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002704 cpu_physical_memory_write_rom_internal(&address_space_memory,
2705 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002706}
2707
aliguori6d16c2f2009-01-22 16:59:11 +00002708typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002709 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002710 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002711 hwaddr addr;
2712 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002713 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002714} BounceBuffer;
2715
2716static BounceBuffer bounce;
2717
aliguoriba223c22009-01-22 16:59:16 +00002718typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002719 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002720 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002721} MapClient;
2722
Fam Zheng38e047b2015-03-16 17:03:35 +08002723QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002724static QLIST_HEAD(map_client_list, MapClient) map_client_list
2725 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002726
Fam Zhenge95205e2015-03-16 17:03:37 +08002727static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002728{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002729 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002730 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002731}
2732
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002733static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002734{
2735 MapClient *client;
2736
Blue Swirl72cf2d42009-09-12 07:36:22 +00002737 while (!QLIST_EMPTY(&map_client_list)) {
2738 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002739 qemu_bh_schedule(client->bh);
2740 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002741 }
2742}
2743
Fam Zhenge95205e2015-03-16 17:03:37 +08002744void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002745{
2746 MapClient *client = g_malloc(sizeof(*client));
2747
Fam Zheng38e047b2015-03-16 17:03:35 +08002748 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002749 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002750 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002751 if (!atomic_read(&bounce.in_use)) {
2752 cpu_notify_map_clients_locked();
2753 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002754 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002755}
2756
Fam Zheng38e047b2015-03-16 17:03:35 +08002757void cpu_exec_init_all(void)
2758{
2759 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002760 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002761 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002762 qemu_mutex_init(&map_client_list_lock);
2763}
2764
Fam Zhenge95205e2015-03-16 17:03:37 +08002765void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002766{
Fam Zhenge95205e2015-03-16 17:03:37 +08002767 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002768
Fam Zhenge95205e2015-03-16 17:03:37 +08002769 qemu_mutex_lock(&map_client_list_lock);
2770 QLIST_FOREACH(client, &map_client_list, link) {
2771 if (client->bh == bh) {
2772 cpu_unregister_map_client_do(client);
2773 break;
2774 }
2775 }
2776 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002777}
2778
2779static void cpu_notify_map_clients(void)
2780{
Fam Zheng38e047b2015-03-16 17:03:35 +08002781 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002782 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002783 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002784}
2785
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002786bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2787{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002788 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002789 hwaddr l, xlat;
2790
Paolo Bonzini41063e12015-03-18 14:21:43 +01002791 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002792 while (len > 0) {
2793 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002794 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2795 if (!memory_access_is_direct(mr, is_write)) {
2796 l = memory_access_size(mr, l, addr);
2797 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002798 return false;
2799 }
2800 }
2801
2802 len -= l;
2803 addr += l;
2804 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002805 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002806 return true;
2807}
2808
aliguori6d16c2f2009-01-22 16:59:11 +00002809/* Map a physical memory region into a host virtual address.
2810 * May map a subset of the requested range, given by and returned in *plen.
2811 * May return NULL if resources needed to perform the mapping are exhausted.
2812 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002813 * Use cpu_register_map_client() to know when retrying the map operation is
2814 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002815 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002816void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002817 hwaddr addr,
2818 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002819 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002820{
Avi Kivitya8170e52012-10-23 12:30:10 +02002821 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002822 hwaddr done = 0;
2823 hwaddr l, xlat, base;
2824 MemoryRegion *mr, *this_mr;
2825 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002826
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002827 if (len == 0) {
2828 return NULL;
2829 }
aliguori6d16c2f2009-01-22 16:59:11 +00002830
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002831 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002832 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002833 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002834
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002835 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002836 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002837 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002838 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002839 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002840 /* Avoid unbounded allocations */
2841 l = MIN(l, TARGET_PAGE_SIZE);
2842 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002843 bounce.addr = addr;
2844 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002845
2846 memory_region_ref(mr);
2847 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002848 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002849 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2850 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002851 }
aliguori6d16c2f2009-01-22 16:59:11 +00002852
Paolo Bonzini41063e12015-03-18 14:21:43 +01002853 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002854 *plen = l;
2855 return bounce.buffer;
2856 }
2857
2858 base = xlat;
2859 raddr = memory_region_get_ram_addr(mr);
2860
2861 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002862 len -= l;
2863 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002864 done += l;
2865 if (len == 0) {
2866 break;
2867 }
2868
2869 l = len;
2870 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2871 if (this_mr != mr || xlat != base + done) {
2872 break;
2873 }
aliguori6d16c2f2009-01-22 16:59:11 +00002874 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002875
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002876 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002877 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002878 *plen = done;
2879 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002880}
2881
Avi Kivityac1970f2012-10-03 16:22:53 +02002882/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002883 * Will also mark the memory as dirty if is_write == 1. access_len gives
2884 * the amount of memory that was actually read or written by the caller.
2885 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002886void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2887 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002888{
2889 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002890 MemoryRegion *mr;
2891 ram_addr_t addr1;
2892
2893 mr = qemu_ram_addr_from_host(buffer, &addr1);
2894 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002895 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002896 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002897 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002898 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002899 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002900 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002901 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002902 return;
2903 }
2904 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002905 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2906 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002907 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002908 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002909 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002910 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002911 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002912 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002913}
bellardd0ecd2a2006-04-23 17:14:48 +00002914
Avi Kivitya8170e52012-10-23 12:30:10 +02002915void *cpu_physical_memory_map(hwaddr addr,
2916 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002917 int is_write)
2918{
2919 return address_space_map(&address_space_memory, addr, plen, is_write);
2920}
2921
Avi Kivitya8170e52012-10-23 12:30:10 +02002922void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2923 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002924{
2925 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2926}
2927
bellard8df1cd02005-01-28 22:37:22 +00002928/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002929static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2930 MemTxAttrs attrs,
2931 MemTxResult *result,
2932 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002933{
bellard8df1cd02005-01-28 22:37:22 +00002934 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002935 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002936 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002937 hwaddr l = 4;
2938 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002939 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002940 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002941
Paolo Bonzini41063e12015-03-18 14:21:43 +01002942 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002943 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002944 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002945 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002946
bellard8df1cd02005-01-28 22:37:22 +00002947 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002948 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002949#if defined(TARGET_WORDS_BIGENDIAN)
2950 if (endian == DEVICE_LITTLE_ENDIAN) {
2951 val = bswap32(val);
2952 }
2953#else
2954 if (endian == DEVICE_BIG_ENDIAN) {
2955 val = bswap32(val);
2956 }
2957#endif
bellard8df1cd02005-01-28 22:37:22 +00002958 } else {
2959 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002960 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002961 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002962 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002963 switch (endian) {
2964 case DEVICE_LITTLE_ENDIAN:
2965 val = ldl_le_p(ptr);
2966 break;
2967 case DEVICE_BIG_ENDIAN:
2968 val = ldl_be_p(ptr);
2969 break;
2970 default:
2971 val = ldl_p(ptr);
2972 break;
2973 }
Peter Maydell50013112015-04-26 16:49:24 +01002974 r = MEMTX_OK;
2975 }
2976 if (result) {
2977 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002978 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002979 if (release_lock) {
2980 qemu_mutex_unlock_iothread();
2981 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002982 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002983 return val;
2984}
2985
Peter Maydell50013112015-04-26 16:49:24 +01002986uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2987 MemTxAttrs attrs, MemTxResult *result)
2988{
2989 return address_space_ldl_internal(as, addr, attrs, result,
2990 DEVICE_NATIVE_ENDIAN);
2991}
2992
2993uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2994 MemTxAttrs attrs, MemTxResult *result)
2995{
2996 return address_space_ldl_internal(as, addr, attrs, result,
2997 DEVICE_LITTLE_ENDIAN);
2998}
2999
3000uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3001 MemTxAttrs attrs, MemTxResult *result)
3002{
3003 return address_space_ldl_internal(as, addr, attrs, result,
3004 DEVICE_BIG_ENDIAN);
3005}
3006
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003007uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003008{
Peter Maydell50013112015-04-26 16:49:24 +01003009 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003010}
3011
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003012uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003013{
Peter Maydell50013112015-04-26 16:49:24 +01003014 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003015}
3016
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003017uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003018{
Peter Maydell50013112015-04-26 16:49:24 +01003019 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003020}
3021
bellard84b7b8e2005-11-28 21:19:04 +00003022/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003023static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3024 MemTxAttrs attrs,
3025 MemTxResult *result,
3026 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003027{
bellard84b7b8e2005-11-28 21:19:04 +00003028 uint8_t *ptr;
3029 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003030 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003031 hwaddr l = 8;
3032 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003033 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003034 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003035
Paolo Bonzini41063e12015-03-18 14:21:43 +01003036 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003037 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003038 false);
3039 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003040 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003041
bellard84b7b8e2005-11-28 21:19:04 +00003042 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003043 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003044#if defined(TARGET_WORDS_BIGENDIAN)
3045 if (endian == DEVICE_LITTLE_ENDIAN) {
3046 val = bswap64(val);
3047 }
3048#else
3049 if (endian == DEVICE_BIG_ENDIAN) {
3050 val = bswap64(val);
3051 }
3052#endif
bellard84b7b8e2005-11-28 21:19:04 +00003053 } else {
3054 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003055 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003056 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003057 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003058 switch (endian) {
3059 case DEVICE_LITTLE_ENDIAN:
3060 val = ldq_le_p(ptr);
3061 break;
3062 case DEVICE_BIG_ENDIAN:
3063 val = ldq_be_p(ptr);
3064 break;
3065 default:
3066 val = ldq_p(ptr);
3067 break;
3068 }
Peter Maydell50013112015-04-26 16:49:24 +01003069 r = MEMTX_OK;
3070 }
3071 if (result) {
3072 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003073 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003074 if (release_lock) {
3075 qemu_mutex_unlock_iothread();
3076 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003077 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003078 return val;
3079}
3080
Peter Maydell50013112015-04-26 16:49:24 +01003081uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3082 MemTxAttrs attrs, MemTxResult *result)
3083{
3084 return address_space_ldq_internal(as, addr, attrs, result,
3085 DEVICE_NATIVE_ENDIAN);
3086}
3087
3088uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3089 MemTxAttrs attrs, MemTxResult *result)
3090{
3091 return address_space_ldq_internal(as, addr, attrs, result,
3092 DEVICE_LITTLE_ENDIAN);
3093}
3094
3095uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3096 MemTxAttrs attrs, MemTxResult *result)
3097{
3098 return address_space_ldq_internal(as, addr, attrs, result,
3099 DEVICE_BIG_ENDIAN);
3100}
3101
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003102uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003103{
Peter Maydell50013112015-04-26 16:49:24 +01003104 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003105}
3106
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003107uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003108{
Peter Maydell50013112015-04-26 16:49:24 +01003109 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003110}
3111
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003112uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003113{
Peter Maydell50013112015-04-26 16:49:24 +01003114 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003115}
3116
bellardaab33092005-10-30 20:48:42 +00003117/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003118uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3119 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003120{
3121 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003122 MemTxResult r;
3123
3124 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3125 if (result) {
3126 *result = r;
3127 }
bellardaab33092005-10-30 20:48:42 +00003128 return val;
3129}
3130
Peter Maydell50013112015-04-26 16:49:24 +01003131uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3132{
3133 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3134}
3135
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003136/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003137static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3138 hwaddr addr,
3139 MemTxAttrs attrs,
3140 MemTxResult *result,
3141 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003142{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003143 uint8_t *ptr;
3144 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003145 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003146 hwaddr l = 2;
3147 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003148 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003149 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003150
Paolo Bonzini41063e12015-03-18 14:21:43 +01003151 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003152 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003153 false);
3154 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003155 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003156
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003157 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003158 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159#if defined(TARGET_WORDS_BIGENDIAN)
3160 if (endian == DEVICE_LITTLE_ENDIAN) {
3161 val = bswap16(val);
3162 }
3163#else
3164 if (endian == DEVICE_BIG_ENDIAN) {
3165 val = bswap16(val);
3166 }
3167#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003168 } else {
3169 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003170 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003171 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003172 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003173 switch (endian) {
3174 case DEVICE_LITTLE_ENDIAN:
3175 val = lduw_le_p(ptr);
3176 break;
3177 case DEVICE_BIG_ENDIAN:
3178 val = lduw_be_p(ptr);
3179 break;
3180 default:
3181 val = lduw_p(ptr);
3182 break;
3183 }
Peter Maydell50013112015-04-26 16:49:24 +01003184 r = MEMTX_OK;
3185 }
3186 if (result) {
3187 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003188 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003189 if (release_lock) {
3190 qemu_mutex_unlock_iothread();
3191 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003192 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003193 return val;
bellardaab33092005-10-30 20:48:42 +00003194}
3195
Peter Maydell50013112015-04-26 16:49:24 +01003196uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3197 MemTxAttrs attrs, MemTxResult *result)
3198{
3199 return address_space_lduw_internal(as, addr, attrs, result,
3200 DEVICE_NATIVE_ENDIAN);
3201}
3202
3203uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3204 MemTxAttrs attrs, MemTxResult *result)
3205{
3206 return address_space_lduw_internal(as, addr, attrs, result,
3207 DEVICE_LITTLE_ENDIAN);
3208}
3209
3210uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3211 MemTxAttrs attrs, MemTxResult *result)
3212{
3213 return address_space_lduw_internal(as, addr, attrs, result,
3214 DEVICE_BIG_ENDIAN);
3215}
3216
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003217uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003218{
Peter Maydell50013112015-04-26 16:49:24 +01003219 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003220}
3221
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003222uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003223{
Peter Maydell50013112015-04-26 16:49:24 +01003224 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003225}
3226
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003227uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003228{
Peter Maydell50013112015-04-26 16:49:24 +01003229 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003230}
3231
bellard8df1cd02005-01-28 22:37:22 +00003232/* warning: addr must be aligned. The ram page is not masked as dirty
3233 and the code inside is not invalidated. It is useful if the dirty
3234 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003235void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3236 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003237{
bellard8df1cd02005-01-28 22:37:22 +00003238 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003239 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003240 hwaddr l = 4;
3241 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003242 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003243 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003244 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003245
Paolo Bonzini41063e12015-03-18 14:21:43 +01003246 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003247 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003248 true);
3249 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003250 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003251
Peter Maydell50013112015-04-26 16:49:24 +01003252 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003253 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003254 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003255 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003256 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003257
Paolo Bonzini845b6212015-03-23 11:45:53 +01003258 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3259 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003260 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003261 r = MEMTX_OK;
3262 }
3263 if (result) {
3264 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003265 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003266 if (release_lock) {
3267 qemu_mutex_unlock_iothread();
3268 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003269 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003270}
3271
Peter Maydell50013112015-04-26 16:49:24 +01003272void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3273{
3274 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3275}
3276
bellard8df1cd02005-01-28 22:37:22 +00003277/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003278static inline void address_space_stl_internal(AddressSpace *as,
3279 hwaddr addr, uint32_t val,
3280 MemTxAttrs attrs,
3281 MemTxResult *result,
3282 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003283{
bellard8df1cd02005-01-28 22:37:22 +00003284 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003285 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003286 hwaddr l = 4;
3287 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003288 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003289 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003290
Paolo Bonzini41063e12015-03-18 14:21:43 +01003291 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003292 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003293 true);
3294 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003295 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003296
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003297#if defined(TARGET_WORDS_BIGENDIAN)
3298 if (endian == DEVICE_LITTLE_ENDIAN) {
3299 val = bswap32(val);
3300 }
3301#else
3302 if (endian == DEVICE_BIG_ENDIAN) {
3303 val = bswap32(val);
3304 }
3305#endif
Peter Maydell50013112015-04-26 16:49:24 +01003306 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003307 } else {
bellard8df1cd02005-01-28 22:37:22 +00003308 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003309 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003310 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003311 switch (endian) {
3312 case DEVICE_LITTLE_ENDIAN:
3313 stl_le_p(ptr, val);
3314 break;
3315 case DEVICE_BIG_ENDIAN:
3316 stl_be_p(ptr, val);
3317 break;
3318 default:
3319 stl_p(ptr, val);
3320 break;
3321 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003322 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003323 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003324 }
Peter Maydell50013112015-04-26 16:49:24 +01003325 if (result) {
3326 *result = r;
3327 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003328 if (release_lock) {
3329 qemu_mutex_unlock_iothread();
3330 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003331 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003332}
3333
3334void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3335 MemTxAttrs attrs, MemTxResult *result)
3336{
3337 address_space_stl_internal(as, addr, val, attrs, result,
3338 DEVICE_NATIVE_ENDIAN);
3339}
3340
3341void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3342 MemTxAttrs attrs, MemTxResult *result)
3343{
3344 address_space_stl_internal(as, addr, val, attrs, result,
3345 DEVICE_LITTLE_ENDIAN);
3346}
3347
3348void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3349 MemTxAttrs attrs, MemTxResult *result)
3350{
3351 address_space_stl_internal(as, addr, val, attrs, result,
3352 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003353}
3354
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003355void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356{
Peter Maydell50013112015-04-26 16:49:24 +01003357 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358}
3359
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003360void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003361{
Peter Maydell50013112015-04-26 16:49:24 +01003362 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363}
3364
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003365void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366{
Peter Maydell50013112015-04-26 16:49:24 +01003367 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368}
3369
bellardaab33092005-10-30 20:48:42 +00003370/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003371void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3372 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003373{
3374 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003375 MemTxResult r;
3376
3377 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3378 if (result) {
3379 *result = r;
3380 }
3381}
3382
3383void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3384{
3385 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003386}
3387
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003388/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003389static inline void address_space_stw_internal(AddressSpace *as,
3390 hwaddr addr, uint32_t val,
3391 MemTxAttrs attrs,
3392 MemTxResult *result,
3393 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003394{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003395 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003396 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003397 hwaddr l = 2;
3398 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003399 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003400 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003401
Paolo Bonzini41063e12015-03-18 14:21:43 +01003402 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003403 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003404 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003405 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003406
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003407#if defined(TARGET_WORDS_BIGENDIAN)
3408 if (endian == DEVICE_LITTLE_ENDIAN) {
3409 val = bswap16(val);
3410 }
3411#else
3412 if (endian == DEVICE_BIG_ENDIAN) {
3413 val = bswap16(val);
3414 }
3415#endif
Peter Maydell50013112015-04-26 16:49:24 +01003416 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003417 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003418 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003419 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003420 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003421 switch (endian) {
3422 case DEVICE_LITTLE_ENDIAN:
3423 stw_le_p(ptr, val);
3424 break;
3425 case DEVICE_BIG_ENDIAN:
3426 stw_be_p(ptr, val);
3427 break;
3428 default:
3429 stw_p(ptr, val);
3430 break;
3431 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003432 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003433 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003434 }
Peter Maydell50013112015-04-26 16:49:24 +01003435 if (result) {
3436 *result = r;
3437 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003438 if (release_lock) {
3439 qemu_mutex_unlock_iothread();
3440 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003441 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003442}
3443
3444void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3445 MemTxAttrs attrs, MemTxResult *result)
3446{
3447 address_space_stw_internal(as, addr, val, attrs, result,
3448 DEVICE_NATIVE_ENDIAN);
3449}
3450
3451void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3452 MemTxAttrs attrs, MemTxResult *result)
3453{
3454 address_space_stw_internal(as, addr, val, attrs, result,
3455 DEVICE_LITTLE_ENDIAN);
3456}
3457
3458void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3459 MemTxAttrs attrs, MemTxResult *result)
3460{
3461 address_space_stw_internal(as, addr, val, attrs, result,
3462 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003463}
3464
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003465void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003466{
Peter Maydell50013112015-04-26 16:49:24 +01003467 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003468}
3469
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003470void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003471{
Peter Maydell50013112015-04-26 16:49:24 +01003472 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003473}
3474
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003475void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003476{
Peter Maydell50013112015-04-26 16:49:24 +01003477 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003478}
3479
bellardaab33092005-10-30 20:48:42 +00003480/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003481void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3482 MemTxAttrs attrs, MemTxResult *result)
3483{
3484 MemTxResult r;
3485 val = tswap64(val);
3486 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3487 if (result) {
3488 *result = r;
3489 }
3490}
3491
3492void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3493 MemTxAttrs attrs, MemTxResult *result)
3494{
3495 MemTxResult r;
3496 val = cpu_to_le64(val);
3497 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3498 if (result) {
3499 *result = r;
3500 }
3501}
3502void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3503 MemTxAttrs attrs, MemTxResult *result)
3504{
3505 MemTxResult r;
3506 val = cpu_to_be64(val);
3507 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3508 if (result) {
3509 *result = r;
3510 }
3511}
3512
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003513void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003514{
Peter Maydell50013112015-04-26 16:49:24 +01003515 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003516}
3517
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003518void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003519{
Peter Maydell50013112015-04-26 16:49:24 +01003520 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003521}
3522
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003523void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003524{
Peter Maydell50013112015-04-26 16:49:24 +01003525 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003526}
3527
aliguori5e2972f2009-03-28 17:51:36 +00003528/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003529int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003530 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003531{
3532 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003533 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003534 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003535
3536 while (len > 0) {
3537 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003538 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003539 /* if no physical page mapped, return an error */
3540 if (phys_addr == -1)
3541 return -1;
3542 l = (page + TARGET_PAGE_SIZE) - addr;
3543 if (l > len)
3544 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003545 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003546 if (is_write) {
3547 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3548 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003549 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3550 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003551 }
bellard13eb76e2004-01-24 15:23:36 +00003552 len -= l;
3553 buf += l;
3554 addr += l;
3555 }
3556 return 0;
3557}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003558
3559/*
3560 * Allows code that needs to deal with migration bitmaps etc to still be built
3561 * target independent.
3562 */
3563size_t qemu_target_page_bits(void)
3564{
3565 return TARGET_PAGE_BITS;
3566}
3567
Paul Brooka68fe892010-03-01 00:08:59 +00003568#endif
bellard13eb76e2004-01-24 15:23:36 +00003569
Blue Swirl8e4a4242013-01-06 18:30:17 +00003570/*
3571 * A helper function for the _utterly broken_ virtio device model to find out if
3572 * it's running on a big endian machine. Don't do this at home kids!
3573 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003574bool target_words_bigendian(void);
3575bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003576{
3577#if defined(TARGET_WORDS_BIGENDIAN)
3578 return true;
3579#else
3580 return false;
3581#endif
3582}
3583
Wen Congyang76f35532012-05-07 12:04:18 +08003584#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003585bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003586{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003587 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003588 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003589 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003590
Paolo Bonzini41063e12015-03-18 14:21:43 +01003591 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003592 mr = address_space_translate(&address_space_memory,
3593 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003594
Paolo Bonzini41063e12015-03-18 14:21:43 +01003595 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3596 rcu_read_unlock();
3597 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003598}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003599
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003600int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003601{
3602 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003603 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003604
Mike Day0dc3f442013-09-05 14:41:35 -04003605 rcu_read_lock();
3606 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003607 ret = func(block->idstr, block->host, block->offset,
3608 block->used_length, opaque);
3609 if (ret) {
3610 break;
3611 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003612 }
Mike Day0dc3f442013-09-05 14:41:35 -04003613 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003614 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003615}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003616#endif