blob: 0bf0a6e7eb04fe38379ea84023eb61f977c8a474 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030091/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030092 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030093#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200314static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200315 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000316{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200317 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200318 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200319 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200320
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200321 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200322 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200324 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200325 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100326 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200327 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200328
329 if (sections[lp.ptr].size.hi ||
330 range_covers_byte(sections[lp.ptr].offset_within_address_space,
331 sections[lp.ptr].size.lo, addr)) {
332 return &sections[lp.ptr];
333 } else {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200336}
337
Blue Swirle5548612012-04-21 13:08:33 +0000338bool memory_region_is_unassigned(MemoryRegion *mr)
339{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200340 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000341 && mr != &io_mem_watch;
342}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100344/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200345static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 hwaddr addr,
347 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200348{
Jan Kiszka90260c62013-05-26 21:46:51 +0200349 MemoryRegionSection *section;
350 subpage_t *subpage;
351
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 if (resolve_subpage && section->mr->subpage) {
354 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200355 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 }
357 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200358}
359
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100360/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200361static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200362address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200363 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200364{
365 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200366 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100367 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200368
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200369 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200370 /* Compute offset within MemoryRegionSection */
371 addr -= section->offset_within_address_space;
372
373 /* Compute offset within MemoryRegion */
374 *xlat = addr + section->offset_within_region;
375
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200376 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200377
378 /* MMIO registers can be expected to perform full-width accesses based only
379 * on their address, without considering adjacent registers that could
380 * decode to completely different MemoryRegions. When such registers
381 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
382 * regions overlap wildly. For this reason we cannot clamp the accesses
383 * here.
384 *
385 * If the length is small (as is the case for address_space_ldl/stl),
386 * everything works fine. If the incoming length is large, however,
387 * the caller really has to do the clamping through memory_access_size.
388 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200390 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200391 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
392 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 return section;
394}
Jan Kiszka90260c62013-05-26 21:46:51 +0200395
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100396static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
397{
398 if (memory_region_is_ram(mr)) {
399 return !(is_write && mr->readonly);
400 }
401 if (memory_region_is_romd(mr)) {
402 return !is_write;
403 }
404
405 return false;
406}
407
Paolo Bonzini41063e12015-03-18 14:21:43 +0100408/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200409MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
410 hwaddr *xlat, hwaddr *plen,
411 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200412{
Avi Kivity30951152012-10-30 13:47:46 +0200413 IOMMUTLBEntry iotlb;
414 MemoryRegionSection *section;
415 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200416
417 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100418 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
419 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200420 mr = section->mr;
421
422 if (!mr->iommu_ops) {
423 break;
424 }
425
Le Tan8d7b8cb2014-08-16 13:55:37 +0800426 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200427 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
428 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700429 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200430 if (!(iotlb.perm & (1 << is_write))) {
431 mr = &io_mem_unassigned;
432 break;
433 }
434
435 as = iotlb.target_as;
436 }
437
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000438 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100439 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100441 }
442
Avi Kivity30951152012-10-30 13:47:46 +0200443 *xlat = addr;
444 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200445}
446
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100447/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200448MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200449address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
450 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200451{
Avi Kivity30951152012-10-30 13:47:46 +0200452 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100453 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200454 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200455
456 assert(!section->mr->iommu_ops);
457 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200458}
bellard9fa3e852004-01-04 18:06:42 +0000459#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000460
Andreas Färberb170fce2013-01-20 20:23:22 +0100461#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000462
Juan Quintelae59fb372009-09-29 22:48:21 +0200463static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200464{
Andreas Färber259186a2013-01-17 18:51:17 +0100465 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200466
aurel323098dba2009-03-07 21:28:24 +0000467 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
468 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100469 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100470 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000471
472 return 0;
473}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475static int cpu_common_pre_load(void *opaque)
476{
477 CPUState *cpu = opaque;
478
Paolo Bonziniadee6422014-12-19 12:53:14 +0100479 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400480
481 return 0;
482}
483
484static bool cpu_common_exception_index_needed(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489}
490
491static const VMStateDescription vmstate_cpu_common_exception_index = {
492 .name = "cpu_common/exception_index",
493 .version_id = 1,
494 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200495 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496 .fields = (VMStateField[]) {
497 VMSTATE_INT32(exception_index, CPUState),
498 VMSTATE_END_OF_LIST()
499 }
500};
501
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300502static bool cpu_common_crash_occurred_needed(void *opaque)
503{
504 CPUState *cpu = opaque;
505
506 return cpu->crash_occurred;
507}
508
509static const VMStateDescription vmstate_cpu_common_crash_occurred = {
510 .name = "cpu_common/crash_occurred",
511 .version_id = 1,
512 .minimum_version_id = 1,
513 .needed = cpu_common_crash_occurred_needed,
514 .fields = (VMStateField[]) {
515 VMSTATE_BOOL(crash_occurred, CPUState),
516 VMSTATE_END_OF_LIST()
517 }
518};
519
Andreas Färber1a1562f2013-06-17 04:09:11 +0200520const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 .name = "cpu_common",
522 .version_id = 1,
523 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200525 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200526 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100527 VMSTATE_UINT32(halted, CPUState),
528 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200529 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200531 .subsections = (const VMStateDescription*[]) {
532 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300533 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200534 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 }
536};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200537
pbrook9656f322008-07-01 20:01:19 +0000538#endif
539
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100540CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400541{
Andreas Färberbdc44642013-06-24 23:50:24 +0200542 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400543
Andreas Färberbdc44642013-06-24 23:50:24 +0200544 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100545 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200546 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100547 }
Glauber Costa950f1472009-06-09 12:15:18 -0400548 }
549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400551}
552
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000553#if !defined(CONFIG_USER_ONLY)
554void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
555{
556 /* We only support one address space per cpu at the moment. */
557 assert(cpu->as == as);
558
Peter Maydell32857f42015-10-01 15:29:50 +0100559 if (cpu->cpu_ases) {
560 /* We've already registered the listener for our only AS */
561 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562 }
Peter Maydell32857f42015-10-01 15:29:50 +0100563
564 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
565 cpu->cpu_ases[0].cpu = cpu;
566 cpu->cpu_ases[0].as = as;
567 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
568 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000569}
570#endif
571
Bharata B Raob7bca732015-06-23 19:31:13 -0700572#ifndef CONFIG_USER_ONLY
573static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
574
575static int cpu_get_free_index(Error **errp)
576{
577 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
578
579 if (cpu >= MAX_CPUMASK_BITS) {
580 error_setg(errp, "Trying to use more CPUs than max of %d",
581 MAX_CPUMASK_BITS);
582 return -1;
583 }
584
585 bitmap_set(cpu_index_map, cpu, 1);
586 return cpu;
587}
588
589void cpu_exec_exit(CPUState *cpu)
590{
591 if (cpu->cpu_index == -1) {
592 /* cpu_index was never allocated by this @cpu or was already freed. */
593 return;
594 }
595
596 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
597 cpu->cpu_index = -1;
598}
599#else
600
601static int cpu_get_free_index(Error **errp)
602{
603 CPUState *some_cpu;
604 int cpu_index = 0;
605
606 CPU_FOREACH(some_cpu) {
607 cpu_index++;
608 }
609 return cpu_index;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614}
615#endif
616
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700617void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000618{
Andreas Färberb170fce2013-01-20 20:23:22 +0100619 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000620 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700621 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000622
Eduardo Habkost291135b2015-04-27 17:00:33 -0300623#ifndef CONFIG_USER_ONLY
624 cpu->as = &address_space_memory;
625 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300626#endif
627
pbrookc2764712009-03-07 15:24:59 +0000628#if defined(CONFIG_USER_ONLY)
629 cpu_list_lock();
630#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700631 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
632 if (local_err) {
633 error_propagate(errp, local_err);
634#if defined(CONFIG_USER_ONLY)
635 cpu_list_unlock();
636#endif
637 return;
bellard6a00d602005-11-21 23:25:50 +0000638 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200639 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000640#if defined(CONFIG_USER_ONLY)
641 cpu_list_unlock();
642#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200643 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
644 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
645 }
pbrookb3c77242008-06-30 16:31:04 +0000646#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600647 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700648 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200650 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000651#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100652 if (cc->vmsd != NULL) {
653 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
654 }
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Paul Brook94df27f2010-02-28 23:47:45 +0000657#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200658static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000659{
660 tb_invalidate_phys_page_range(pc, pc + 1, 0);
661}
662#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200663static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400664{
Max Filippove8262a12013-09-27 22:29:17 +0400665 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
666 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000667 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100668 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400669 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400670}
bellardc27004e2005-01-03 23:35:10 +0000671#endif
bellardd720b932004-04-25 17:57:43 +0000672
Paul Brookc527ee82010-03-01 03:31:14 +0000673#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200674void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000675
676{
677}
678
Peter Maydell3ee887e2014-09-12 14:06:48 +0100679int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
680 int flags)
681{
682 return -ENOSYS;
683}
684
685void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
686{
687}
688
Andreas Färber75a34032013-09-02 16:57:02 +0200689int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000690 int flags, CPUWatchpoint **watchpoint)
691{
692 return -ENOSYS;
693}
694#else
pbrook6658ffb2007-03-16 23:58:11 +0000695/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200696int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000697 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000698{
aliguoric0ce9982008-11-25 22:13:57 +0000699 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000700
Peter Maydell05068c02014-09-12 14:06:48 +0100701 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700702 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200703 error_report("tried to set invalid watchpoint at %"
704 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000705 return -EINVAL;
706 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500707 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000708
aliguoria1d1bb32008-11-18 20:07:32 +0000709 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100710 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000711 wp->flags = flags;
712
aliguori2dc9f412008-11-18 20:56:59 +0000713 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200714 if (flags & BP_GDB) {
715 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
716 } else {
717 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
718 }
aliguoria1d1bb32008-11-18 20:07:32 +0000719
Andreas Färber31b030d2013-09-04 01:29:02 +0200720 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000721
722 if (watchpoint)
723 *watchpoint = wp;
724 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000725}
726
aliguoria1d1bb32008-11-18 20:07:32 +0000727/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000729 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000730{
aliguoria1d1bb32008-11-18 20:07:32 +0000731 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000732
Andreas Färberff4700b2013-08-26 18:23:18 +0200733 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100734 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000735 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200736 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000737 return 0;
738 }
739 }
aliguoria1d1bb32008-11-18 20:07:32 +0000740 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000741}
742
aliguoria1d1bb32008-11-18 20:07:32 +0000743/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200744void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000745{
Andreas Färberff4700b2013-08-26 18:23:18 +0200746 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000747
Andreas Färber31b030d2013-09-04 01:29:02 +0200748 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000749
Anthony Liguori7267c092011-08-20 22:09:37 -0500750 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000751}
752
aliguoria1d1bb32008-11-18 20:07:32 +0000753/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200754void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000755{
aliguoric0ce9982008-11-25 22:13:57 +0000756 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000757
Andreas Färberff4700b2013-08-26 18:23:18 +0200758 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200759 if (wp->flags & mask) {
760 cpu_watchpoint_remove_by_ref(cpu, wp);
761 }
aliguoric0ce9982008-11-25 22:13:57 +0000762 }
aliguoria1d1bb32008-11-18 20:07:32 +0000763}
Peter Maydell05068c02014-09-12 14:06:48 +0100764
765/* Return true if this watchpoint address matches the specified
766 * access (ie the address range covered by the watchpoint overlaps
767 * partially or completely with the address range covered by the
768 * access).
769 */
770static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
771 vaddr addr,
772 vaddr len)
773{
774 /* We know the lengths are non-zero, but a little caution is
775 * required to avoid errors in the case where the range ends
776 * exactly at the top of the address space and so addr + len
777 * wraps round to zero.
778 */
779 vaddr wpend = wp->vaddr + wp->len - 1;
780 vaddr addrend = addr + len - 1;
781
782 return !(addr > wpend || wp->vaddr > addrend);
783}
784
Paul Brookc527ee82010-03-01 03:31:14 +0000785#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000786
787/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200788int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000789 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000792
Anthony Liguori7267c092011-08-20 22:09:37 -0500793 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000794
795 bp->pc = pc;
796 bp->flags = flags;
797
aliguori2dc9f412008-11-18 20:56:59 +0000798 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200801 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200802 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200803 }
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Andreas Färberf0c3c502013-08-26 21:22:53 +0200805 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000806
Andreas Färber00b941e2013-06-29 18:55:54 +0200807 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000808 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200809 }
aliguoria1d1bb32008-11-18 20:07:32 +0000810 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000811}
812
813/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200814int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000815{
aliguoria1d1bb32008-11-18 20:07:32 +0000816 CPUBreakpoint *bp;
817
Andreas Färberf0c3c502013-08-26 21:22:53 +0200818 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000819 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000821 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000822 }
bellard4c3a88a2003-07-26 12:06:08 +0000823 }
aliguoria1d1bb32008-11-18 20:07:32 +0000824 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000825}
826
aliguoria1d1bb32008-11-18 20:07:32 +0000827/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200828void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000829{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200830 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
831
832 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000833
Anthony Liguori7267c092011-08-20 22:09:37 -0500834 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000835}
836
837/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200838void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000839{
aliguoric0ce9982008-11-25 22:13:57 +0000840 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200843 if (bp->flags & mask) {
844 cpu_breakpoint_remove_by_ref(cpu, bp);
845 }
aliguoric0ce9982008-11-25 22:13:57 +0000846 }
bellard4c3a88a2003-07-26 12:06:08 +0000847}
848
bellardc33a3462003-07-29 20:50:33 +0000849/* enable or disable single step mode. EXCP_DEBUG is returned by the
850 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200851void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000852{
Andreas Färbered2803d2013-06-21 20:20:45 +0200853 if (cpu->singlestep_enabled != enabled) {
854 cpu->singlestep_enabled = enabled;
855 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200856 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200857 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100858 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000859 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700860 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862 }
bellardc33a3462003-07-29 20:50:33 +0000863}
864
Andreas Färbera47dddd2013-09-03 17:38:47 +0200865void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000866{
867 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000868 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000869
870 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000871 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000872 fprintf(stderr, "qemu: fatal: ");
873 vfprintf(stderr, fmt, ap);
874 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200875 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000876 if (qemu_log_enabled()) {
877 qemu_log("qemu: fatal: ");
878 qemu_log_vprintf(fmt, ap2);
879 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200880 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000881 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000882 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000883 }
pbrook493ae1f2007-11-23 16:53:59 +0000884 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000885 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300886 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200887#if defined(CONFIG_USER_ONLY)
888 {
889 struct sigaction act;
890 sigfillset(&act.sa_mask);
891 act.sa_handler = SIG_DFL;
892 sigaction(SIGABRT, &act, NULL);
893 }
894#endif
bellard75012672003-06-21 13:11:07 +0000895 abort();
896}
897
bellard01243112004-01-04 15:48:17 +0000898#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400899/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200900static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
901{
902 RAMBlock *block;
903
Paolo Bonzini43771532013-09-09 17:58:40 +0200904 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200905 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200906 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907 }
Mike Day0dc3f442013-09-05 14:41:35 -0400908 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200909 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200910 goto found;
911 }
912 }
913
914 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
915 abort();
916
917found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200918 /* It is safe to write mru_block outside the iothread lock. This
919 * is what happens:
920 *
921 * mru_block = xxx
922 * rcu_read_unlock()
923 * xxx removed from list
924 * rcu_read_lock()
925 * read mru_block
926 * mru_block = NULL;
927 * call_rcu(reclaim_ramblock, xxx);
928 * rcu_read_unlock()
929 *
930 * atomic_rcu_set is not needed here. The block was already published
931 * when it was placed into the list. Here we're just making an extra
932 * copy of the pointer.
933 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200934 ram_list.mru_block = block;
935 return block;
936}
937
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200938static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000939{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700940 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200942 RAMBlock *block;
943 ram_addr_t end;
944
945 end = TARGET_PAGE_ALIGN(start + length);
946 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000947
Mike Day0dc3f442013-09-05 14:41:35 -0400948 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 block = qemu_get_ram_block(start);
950 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200951 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700952 CPU_FOREACH(cpu) {
953 tlb_reset_dirty(cpu, start1, length);
954 }
Mike Day0dc3f442013-09-05 14:41:35 -0400955 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200956}
957
958/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000959bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
960 ram_addr_t length,
961 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200962{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963 unsigned long end, page;
964 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200965
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000966 if (length == 0) {
967 return false;
968 }
969
970 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
971 page = start >> TARGET_PAGE_BITS;
972 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
973 page, end - page);
974
975 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200976 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200977 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000978
979 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000980}
981
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100982/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200983hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200984 MemoryRegionSection *section,
985 target_ulong vaddr,
986 hwaddr paddr, hwaddr xlat,
987 int prot,
988 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000989{
Avi Kivitya8170e52012-10-23 12:30:10 +0200990 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000991 CPUWatchpoint *wp;
992
Blue Swirlcc5bea62012-04-14 14:56:48 +0000993 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000994 /* Normal RAM. */
995 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200996 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000997 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000999 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001000 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001001 }
1002 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001003 AddressSpaceDispatch *d;
1004
1005 d = atomic_rcu_read(&section->address_space->dispatch);
1006 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001007 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001008 }
1009
1010 /* Make accesses to pages with watchpoints go via the
1011 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001012 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001013 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001014 /* Avoid trapping reads of pages with a write breakpoint. */
1015 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001016 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001017 *address |= TLB_MMIO;
1018 break;
1019 }
1020 }
1021 }
1022
1023 return iotlb;
1024}
bellard9fa3e852004-01-04 18:06:42 +00001025#endif /* defined(CONFIG_USER_ONLY) */
1026
pbrooke2eef172008-06-08 01:09:01 +00001027#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001028
Anthony Liguoric227f092009-10-01 16:12:16 -05001029static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001030 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001031static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001032
Igor Mammedova2b257d2014-10-31 16:38:37 +00001033static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1034 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001035
1036/*
1037 * Set a custom physical guest memory alloator.
1038 * Accelerators with unusual needs may need this. Hopefully, we can
1039 * get rid of it eventually.
1040 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001041void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001042{
1043 phys_mem_alloc = alloc;
1044}
1045
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001046static uint16_t phys_section_add(PhysPageMap *map,
1047 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001048{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001049 /* The physical section number is ORed with a page-aligned
1050 * pointer to produce the iotlb entries. Thus it should
1051 * never overflow into the page-aligned value.
1052 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001054
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001055 if (map->sections_nb == map->sections_nb_alloc) {
1056 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1057 map->sections = g_renew(MemoryRegionSection, map->sections,
1058 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001059 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001061 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001062 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001063}
1064
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001065static void phys_section_destroy(MemoryRegion *mr)
1066{
Don Slutz55b4e802015-11-30 17:11:04 -05001067 bool have_sub_page = mr->subpage;
1068
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001069 memory_region_unref(mr);
1070
Don Slutz55b4e802015-11-30 17:11:04 -05001071 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001072 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001073 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001074 g_free(subpage);
1075 }
1076}
1077
Paolo Bonzini60926662013-05-29 12:30:26 +02001078static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001079{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001080 while (map->sections_nb > 0) {
1081 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001082 phys_section_destroy(section->mr);
1083 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001084 g_free(map->sections);
1085 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001086}
1087
Avi Kivityac1970f2012-10-03 16:22:53 +02001088static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001089{
1090 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001091 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001092 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001093 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001094 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001095 MemoryRegionSection subsection = {
1096 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001097 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001099 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001100
Avi Kivityf3705d52012-03-08 16:16:34 +02001101 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102
Avi Kivityf3705d52012-03-08 16:16:34 +02001103 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001104 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001105 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001106 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001107 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001110 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001111 }
1112 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001113 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001114 subpage_register(subpage, start, end,
1115 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001116}
1117
1118
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001119static void register_multipage(AddressSpaceDispatch *d,
1120 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001121{
Avi Kivitya8170e52012-10-23 12:30:10 +02001122 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001123 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001124 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1125 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001126
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001127 assert(num_pages);
1128 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001129}
1130
Avi Kivityac1970f2012-10-03 16:22:53 +02001131static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001132{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001133 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001134 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001135 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001136 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001137
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001138 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1139 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1140 - now.offset_within_address_space;
1141
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001142 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001143 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001144 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001145 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001146 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001147 while (int128_ne(remain.size, now.size)) {
1148 remain.size = int128_sub(remain.size, now.size);
1149 remain.offset_within_address_space += int128_get64(now.size);
1150 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001151 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001152 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001153 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001154 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001155 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001156 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001157 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001158 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001159 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001160 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161 }
1162}
1163
Sheng Yang62a27442010-01-26 19:21:16 +08001164void qemu_flush_coalesced_mmio_buffer(void)
1165{
1166 if (kvm_enabled())
1167 kvm_flush_coalesced_mmio_buffer();
1168}
1169
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001170void qemu_mutex_lock_ramlist(void)
1171{
1172 qemu_mutex_lock(&ram_list.mutex);
1173}
1174
1175void qemu_mutex_unlock_ramlist(void)
1176{
1177 qemu_mutex_unlock(&ram_list.mutex);
1178}
1179
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001180#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001181
1182#include <sys/vfs.h>
1183
1184#define HUGETLBFS_MAGIC 0x958458f6
1185
Hu Taofc7a5802014-09-09 13:28:01 +08001186static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001187{
1188 struct statfs fs;
1189 int ret;
1190
1191 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001192 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001193 } while (ret != 0 && errno == EINTR);
1194
1195 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001196 error_setg_errno(errp, errno, "failed to get page size of file %s",
1197 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001198 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199 }
1200
Marcelo Tosattic9027602010-03-01 20:25:08 -03001201 return fs.f_bsize;
1202}
1203
Alex Williamson04b16652010-07-02 11:13:17 -06001204static void *file_ram_alloc(RAMBlock *block,
1205 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001206 const char *path,
1207 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001208{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001209 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001210 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001211 char *sanitized_name;
1212 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001213 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001214 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001215 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001216 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001217
Hu Taofc7a5802014-09-09 13:28:01 +08001218 hpagesize = gethugepagesize(path, &local_err);
1219 if (local_err) {
1220 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001221 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001222 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001223 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224
1225 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001226 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1227 "or larger than huge page size 0x%" PRIx64,
1228 memory, hpagesize);
1229 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001230 }
1231
1232 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001233 error_setg(errp,
1234 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001235 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236 }
1237
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001238 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1239 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1240 sanitized_name = g_strdup(memory_region_name(block->mr));
1241 for (c = sanitized_name; *c != '\0'; c++) {
1242 if (*c == '/') {
1243 *c = '_';
1244 }
1245 }
1246
1247 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1248 sanitized_name);
1249 g_free(sanitized_name);
1250
1251 fd = mkstemp(filename);
1252 if (fd >= 0) {
1253 unlink(filename);
1254 }
1255 g_free(filename);
1256 } else {
1257 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001258 }
1259
Marcelo Tosattic9027602010-03-01 20:25:08 -03001260 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001261 error_setg_errno(errp, errno,
1262 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001263 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265
Chen Hanxiao9284f312015-07-24 11:12:03 +08001266 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267
1268 /*
1269 * ftruncate is not supported by hugetlbfs in older
1270 * hosts, so don't bother bailing out on errors.
1271 * If anything goes wrong with it under other filesystems,
1272 * mmap will fail.
1273 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001274 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001275 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001276 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001277
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001278 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001279 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001280 error_setg_errno(errp, errno,
1281 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001282 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001283 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001284 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001285
1286 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001287 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001288 }
1289
Alex Williamson04b16652010-07-02 11:13:17 -06001290 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001291 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001292
1293error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001294 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001295}
1296#endif
1297
Mike Day0dc3f442013-09-05 14:41:35 -04001298/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001299static ram_addr_t find_ram_offset(ram_addr_t size)
1300{
Alex Williamson04b16652010-07-02 11:13:17 -06001301 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001302 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001303
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001304 assert(size != 0); /* it would hand out same offset multiple times */
1305
Mike Day0dc3f442013-09-05 14:41:35 -04001306 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001307 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001308 }
Alex Williamson04b16652010-07-02 11:13:17 -06001309
Mike Day0dc3f442013-09-05 14:41:35 -04001310 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001311 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001312
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001313 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001314
Mike Day0dc3f442013-09-05 14:41:35 -04001315 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001316 if (next_block->offset >= end) {
1317 next = MIN(next, next_block->offset);
1318 }
1319 }
1320 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001321 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001322 mingap = next - end;
1323 }
1324 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001325
1326 if (offset == RAM_ADDR_MAX) {
1327 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1328 (uint64_t)size);
1329 abort();
1330 }
1331
Alex Williamson04b16652010-07-02 11:13:17 -06001332 return offset;
1333}
1334
Juan Quintela652d7ec2012-07-20 10:37:54 +02001335ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001336{
Alex Williamsond17b5282010-06-25 11:08:38 -06001337 RAMBlock *block;
1338 ram_addr_t last = 0;
1339
Mike Day0dc3f442013-09-05 14:41:35 -04001340 rcu_read_lock();
1341 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001342 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001343 }
Mike Day0dc3f442013-09-05 14:41:35 -04001344 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001345 return last;
1346}
1347
Jason Baronddb97f12012-08-02 15:44:16 -04001348static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1349{
1350 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001351
1352 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001353 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001354 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1355 if (ret) {
1356 perror("qemu_madvise");
1357 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1358 "but dump_guest_core=off specified\n");
1359 }
1360 }
1361}
1362
Mike Day0dc3f442013-09-05 14:41:35 -04001363/* Called within an RCU critical section, or while the ramlist lock
1364 * is held.
1365 */
Hu Tao20cfe882014-04-02 15:13:26 +08001366static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001367{
Hu Tao20cfe882014-04-02 15:13:26 +08001368 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001369
Mike Day0dc3f442013-09-05 14:41:35 -04001370 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001371 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001372 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001373 }
1374 }
Hu Tao20cfe882014-04-02 15:13:26 +08001375
1376 return NULL;
1377}
1378
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001379const char *qemu_ram_get_idstr(RAMBlock *rb)
1380{
1381 return rb->idstr;
1382}
1383
Mike Dayae3a7042013-09-05 14:41:35 -04001384/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001385void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1386{
Mike Dayae3a7042013-09-05 14:41:35 -04001387 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001388
Mike Day0dc3f442013-09-05 14:41:35 -04001389 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001390 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001391 assert(new_block);
1392 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001393
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001394 if (dev) {
1395 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001396 if (id) {
1397 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001398 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001399 }
1400 }
1401 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1402
Mike Day0dc3f442013-09-05 14:41:35 -04001403 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001404 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001405 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1406 new_block->idstr);
1407 abort();
1408 }
1409 }
Mike Day0dc3f442013-09-05 14:41:35 -04001410 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001411}
1412
Mike Dayae3a7042013-09-05 14:41:35 -04001413/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001414void qemu_ram_unset_idstr(ram_addr_t addr)
1415{
Mike Dayae3a7042013-09-05 14:41:35 -04001416 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001417
Mike Dayae3a7042013-09-05 14:41:35 -04001418 /* FIXME: arch_init.c assumes that this is not called throughout
1419 * migration. Ignore the problem since hot-unplug during migration
1420 * does not work anyway.
1421 */
1422
Mike Day0dc3f442013-09-05 14:41:35 -04001423 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001424 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001425 if (block) {
1426 memset(block->idstr, 0, sizeof(block->idstr));
1427 }
Mike Day0dc3f442013-09-05 14:41:35 -04001428 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001429}
1430
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001431static int memory_try_enable_merging(void *addr, size_t len)
1432{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001433 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001434 /* disabled by the user */
1435 return 0;
1436 }
1437
1438 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1439}
1440
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001441/* Only legal before guest might have detected the memory size: e.g. on
1442 * incoming migration, or right after reset.
1443 *
1444 * As memory core doesn't know how is memory accessed, it is up to
1445 * resize callback to update device state and/or add assertions to detect
1446 * misuse, if necessary.
1447 */
1448int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1449{
1450 RAMBlock *block = find_ram_block(base);
1451
1452 assert(block);
1453
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001454 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001455
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001456 if (block->used_length == newsize) {
1457 return 0;
1458 }
1459
1460 if (!(block->flags & RAM_RESIZEABLE)) {
1461 error_setg_errno(errp, EINVAL,
1462 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1463 " in != 0x" RAM_ADDR_FMT, block->idstr,
1464 newsize, block->used_length);
1465 return -EINVAL;
1466 }
1467
1468 if (block->max_length < newsize) {
1469 error_setg_errno(errp, EINVAL,
1470 "Length too large: %s: 0x" RAM_ADDR_FMT
1471 " > 0x" RAM_ADDR_FMT, block->idstr,
1472 newsize, block->max_length);
1473 return -EINVAL;
1474 }
1475
1476 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1477 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001478 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1479 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001480 memory_region_set_size(block->mr, newsize);
1481 if (block->resized) {
1482 block->resized(block->idstr, newsize, block->host);
1483 }
1484 return 0;
1485}
1486
Hu Taoef701d72014-09-09 13:27:54 +08001487static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001488{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001489 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001490 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001491 ram_addr_t old_ram_size, new_ram_size;
1492
1493 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001494
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001495 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001496 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001497
1498 if (!new_block->host) {
1499 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001500 xen_ram_alloc(new_block->offset, new_block->max_length,
1501 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001502 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001503 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001504 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001505 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001506 error_setg_errno(errp, errno,
1507 "cannot set up guest memory '%s'",
1508 memory_region_name(new_block->mr));
1509 qemu_mutex_unlock_ramlist();
1510 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001511 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001512 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001513 }
1514 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001515
Li Zhijiandd631692015-07-02 20:18:06 +08001516 new_ram_size = MAX(old_ram_size,
1517 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1518 if (new_ram_size > old_ram_size) {
1519 migration_bitmap_extend(old_ram_size, new_ram_size);
1520 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001521 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1522 * QLIST (which has an RCU-friendly variant) does not have insertion at
1523 * tail, so save the last element in last_block.
1524 */
Mike Day0dc3f442013-09-05 14:41:35 -04001525 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001526 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001527 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001528 break;
1529 }
1530 }
1531 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001533 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001534 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001535 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001536 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001537 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001538 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001539
Mike Day0dc3f442013-09-05 14:41:35 -04001540 /* Write list before version */
1541 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001542 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001543 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001544
Juan Quintela2152f5c2013-10-08 13:52:02 +02001545 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1546
1547 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001548 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001549
1550 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001551 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1552 ram_list.dirty_memory[i] =
1553 bitmap_zero_extend(ram_list.dirty_memory[i],
1554 old_ram_size, new_ram_size);
1555 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001556 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001557 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001558 new_block->used_length,
1559 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001560
Paolo Bonzinia904c912015-01-21 16:18:35 +01001561 if (new_block->host) {
1562 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1563 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1564 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1565 if (kvm_enabled()) {
1566 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1567 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001568 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001569
1570 return new_block->offset;
1571}
1572
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001573#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001574ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001575 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001576 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577{
1578 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001579 ram_addr_t addr;
1580 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581
1582 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001583 error_setg(errp, "-mem-path not supported with Xen");
1584 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001585 }
1586
1587 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1588 /*
1589 * file_ram_alloc() needs to allocate just like
1590 * phys_mem_alloc, but we haven't bothered to provide
1591 * a hook there.
1592 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001593 error_setg(errp,
1594 "-mem-path not supported with this accelerator");
1595 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001596 }
1597
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001598 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001599 new_block = g_malloc0(sizeof(*new_block));
1600 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001601 new_block->used_length = size;
1602 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001603 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001604 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001605 new_block->host = file_ram_alloc(new_block, size,
1606 mem_path, errp);
1607 if (!new_block->host) {
1608 g_free(new_block);
1609 return -1;
1610 }
1611
Hu Taoef701d72014-09-09 13:27:54 +08001612 addr = ram_block_add(new_block, &local_err);
1613 if (local_err) {
1614 g_free(new_block);
1615 error_propagate(errp, local_err);
1616 return -1;
1617 }
1618 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001619}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001620#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001621
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001622static
1623ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1624 void (*resized)(const char*,
1625 uint64_t length,
1626 void *host),
1627 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001628 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001629{
1630 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001631 ram_addr_t addr;
1632 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001633
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001634 size = HOST_PAGE_ALIGN(size);
1635 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 new_block = g_malloc0(sizeof(*new_block));
1637 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001638 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001639 new_block->used_length = size;
1640 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001641 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642 new_block->fd = -1;
1643 new_block->host = host;
1644 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001645 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001646 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001647 if (resizeable) {
1648 new_block->flags |= RAM_RESIZEABLE;
1649 }
Hu Taoef701d72014-09-09 13:27:54 +08001650 addr = ram_block_add(new_block, &local_err);
1651 if (local_err) {
1652 g_free(new_block);
1653 error_propagate(errp, local_err);
1654 return -1;
1655 }
1656 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657}
1658
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001659ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1660 MemoryRegion *mr, Error **errp)
1661{
1662 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1663}
1664
Hu Taoef701d72014-09-09 13:27:54 +08001665ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001666{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001667 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1668}
1669
1670ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1671 void (*resized)(const char*,
1672 uint64_t length,
1673 void *host),
1674 MemoryRegion *mr, Error **errp)
1675{
1676 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001677}
bellarde9a1ab12007-02-08 23:08:38 +00001678
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001679void qemu_ram_free_from_ptr(ram_addr_t addr)
1680{
1681 RAMBlock *block;
1682
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001683 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001684 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001685 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001686 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001687 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001688 /* Write list before version */
1689 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001690 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001691 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001692 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001693 }
1694 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001695 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001696}
1697
Paolo Bonzini43771532013-09-09 17:58:40 +02001698static void reclaim_ramblock(RAMBlock *block)
1699{
1700 if (block->flags & RAM_PREALLOC) {
1701 ;
1702 } else if (xen_enabled()) {
1703 xen_invalidate_map_cache_entry(block->host);
1704#ifndef _WIN32
1705 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001706 if (block->flags & RAM_FILE) {
1707 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001708 } else {
1709 munmap(block->host, block->max_length);
1710 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001711 close(block->fd);
1712#endif
1713 } else {
1714 qemu_anon_ram_free(block->host, block->max_length);
1715 }
1716 g_free(block);
1717}
1718
Anthony Liguoric227f092009-10-01 16:12:16 -05001719void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001720{
Alex Williamson04b16652010-07-02 11:13:17 -06001721 RAMBlock *block;
1722
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001723 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001724 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001725 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001726 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001727 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001728 /* Write list before version */
1729 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001730 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001731 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001732 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001733 }
1734 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001735 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001736}
1737
Huang Yingcd19cfa2011-03-02 08:56:19 +01001738#ifndef _WIN32
1739void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1740{
1741 RAMBlock *block;
1742 ram_addr_t offset;
1743 int flags;
1744 void *area, *vaddr;
1745
Mike Day0dc3f442013-09-05 14:41:35 -04001746 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001747 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001748 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001749 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001750 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001751 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001752 } else if (xen_enabled()) {
1753 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001754 } else {
1755 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001756 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001757 flags |= (block->flags & RAM_SHARED ?
1758 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001759 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1760 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001761 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001762 /*
1763 * Remap needs to match alloc. Accelerators that
1764 * set phys_mem_alloc never remap. If they did,
1765 * we'd need a remap hook here.
1766 */
1767 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1768
Huang Yingcd19cfa2011-03-02 08:56:19 +01001769 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1770 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1771 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001772 }
1773 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001774 fprintf(stderr, "Could not remap addr: "
1775 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001776 length, addr);
1777 exit(1);
1778 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001779 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001780 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001781 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001782 }
1783 }
1784}
1785#endif /* !_WIN32 */
1786
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001787int qemu_get_ram_fd(ram_addr_t addr)
1788{
Mike Dayae3a7042013-09-05 14:41:35 -04001789 RAMBlock *block;
1790 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001791
Mike Day0dc3f442013-09-05 14:41:35 -04001792 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001793 block = qemu_get_ram_block(addr);
1794 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001795 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001796 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001797}
1798
Damjan Marion3fd74b82014-06-26 23:01:32 +02001799void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1800{
Mike Dayae3a7042013-09-05 14:41:35 -04001801 RAMBlock *block;
1802 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001803
Mike Day0dc3f442013-09-05 14:41:35 -04001804 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001805 block = qemu_get_ram_block(addr);
1806 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001807 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001808 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001809}
1810
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001811/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001812 * This should not be used for general purpose DMA. Use address_space_map
1813 * or address_space_rw instead. For local memory (e.g. video ram) that the
1814 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001815 *
1816 * By the time this function returns, the returned pointer is not protected
1817 * by RCU anymore. If the caller is not within an RCU critical section and
1818 * does not hold the iothread lock, it must have other means of protecting the
1819 * pointer, such as a reference to the region that includes the incoming
1820 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001821 */
1822void *qemu_get_ram_ptr(ram_addr_t addr)
1823{
Mike Dayae3a7042013-09-05 14:41:35 -04001824 RAMBlock *block;
1825 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001826
Mike Day0dc3f442013-09-05 14:41:35 -04001827 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001828 block = qemu_get_ram_block(addr);
1829
1830 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001831 /* We need to check if the requested address is in the RAM
1832 * because we don't want to map the entire memory in QEMU.
1833 * In that case just map until the end of the page.
1834 */
1835 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001836 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001837 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001838 }
Mike Dayae3a7042013-09-05 14:41:35 -04001839
1840 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001841 }
Mike Dayae3a7042013-09-05 14:41:35 -04001842 ptr = ramblock_ptr(block, addr - block->offset);
1843
Mike Day0dc3f442013-09-05 14:41:35 -04001844unlock:
1845 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001846 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001847}
1848
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001849/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001850 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001851 *
1852 * By the time this function returns, the returned pointer is not protected
1853 * by RCU anymore. If the caller is not within an RCU critical section and
1854 * does not hold the iothread lock, it must have other means of protecting the
1855 * pointer, such as a reference to the region that includes the incoming
1856 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001857 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001858static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001859{
Mike Dayae3a7042013-09-05 14:41:35 -04001860 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001861 if (*size == 0) {
1862 return NULL;
1863 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001864 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001865 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001866 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001867 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001868 rcu_read_lock();
1869 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001870 if (addr - block->offset < block->max_length) {
1871 if (addr - block->offset + *size > block->max_length)
1872 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001873 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001874 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001875 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001876 }
1877 }
1878
1879 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1880 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001881 }
1882}
1883
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001884/*
1885 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1886 * in that RAMBlock.
1887 *
1888 * ptr: Host pointer to look up
1889 * round_offset: If true round the result offset down to a page boundary
1890 * *ram_addr: set to result ram_addr
1891 * *offset: set to result offset within the RAMBlock
1892 *
1893 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001894 *
1895 * By the time this function returns, the returned pointer is not protected
1896 * by RCU anymore. If the caller is not within an RCU critical section and
1897 * does not hold the iothread lock, it must have other means of protecting the
1898 * pointer, such as a reference to the region that includes the incoming
1899 * ram_addr_t.
1900 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001901RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1902 ram_addr_t *ram_addr,
1903 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001904{
pbrook94a6b542009-04-11 17:15:54 +00001905 RAMBlock *block;
1906 uint8_t *host = ptr;
1907
Jan Kiszka868bb332011-06-21 22:59:09 +02001908 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001909 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001910 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001911 block = qemu_get_ram_block(*ram_addr);
1912 if (block) {
1913 *offset = (host - block->host);
1914 }
Mike Day0dc3f442013-09-05 14:41:35 -04001915 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001916 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001917 }
1918
Mike Day0dc3f442013-09-05 14:41:35 -04001919 rcu_read_lock();
1920 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001921 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001922 goto found;
1923 }
1924
Mike Day0dc3f442013-09-05 14:41:35 -04001925 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001926 /* This case append when the block is not mapped. */
1927 if (block->host == NULL) {
1928 continue;
1929 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001930 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001931 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001932 }
pbrook94a6b542009-04-11 17:15:54 +00001933 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001934
Mike Day0dc3f442013-09-05 14:41:35 -04001935 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001936 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001937
1938found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001939 *offset = (host - block->host);
1940 if (round_offset) {
1941 *offset &= TARGET_PAGE_MASK;
1942 }
1943 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001944 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001945 return block;
1946}
1947
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001948/*
1949 * Finds the named RAMBlock
1950 *
1951 * name: The name of RAMBlock to find
1952 *
1953 * Returns: RAMBlock (or NULL if not found)
1954 */
1955RAMBlock *qemu_ram_block_by_name(const char *name)
1956{
1957 RAMBlock *block;
1958
1959 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1960 if (!strcmp(name, block->idstr)) {
1961 return block;
1962 }
1963 }
1964
1965 return NULL;
1966}
1967
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001968/* Some of the softmmu routines need to translate from a host pointer
1969 (typically a TLB entry) back to a ram offset. */
1970MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1971{
1972 RAMBlock *block;
1973 ram_addr_t offset; /* Not used */
1974
1975 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1976
1977 if (!block) {
1978 return NULL;
1979 }
1980
1981 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001982}
Alex Williamsonf471a172010-06-11 11:11:42 -06001983
Avi Kivitya8170e52012-10-23 12:30:10 +02001984static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001985 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001986{
Juan Quintela52159192013-10-08 12:44:04 +02001987 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001988 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001989 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001990 switch (size) {
1991 case 1:
1992 stb_p(qemu_get_ram_ptr(ram_addr), val);
1993 break;
1994 case 2:
1995 stw_p(qemu_get_ram_ptr(ram_addr), val);
1996 break;
1997 case 4:
1998 stl_p(qemu_get_ram_ptr(ram_addr), val);
1999 break;
2000 default:
2001 abort();
2002 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002003 /* Set both VGA and migration bits for simplicity and to remove
2004 * the notdirty callback faster.
2005 */
2006 cpu_physical_memory_set_dirty_range(ram_addr, size,
2007 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002008 /* we remove the notdirty callback only if the code has been
2009 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002010 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002011 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002012 }
bellard1ccde1c2004-02-06 19:46:14 +00002013}
2014
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002015static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2016 unsigned size, bool is_write)
2017{
2018 return is_write;
2019}
2020
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002021static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002022 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002023 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002024 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002025};
2026
pbrook0f459d12008-06-09 00:20:13 +00002027/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002028static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002029{
Andreas Färber93afead2013-08-26 03:41:01 +02002030 CPUState *cpu = current_cpu;
2031 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002032 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002033 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002034 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002035 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002036
Andreas Färberff4700b2013-08-26 18:23:18 +02002037 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002038 /* We re-entered the check after replacing the TB. Now raise
2039 * the debug interrupt so that is will trigger after the
2040 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002041 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002042 return;
2043 }
Andreas Färber93afead2013-08-26 03:41:01 +02002044 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002045 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002046 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2047 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002048 if (flags == BP_MEM_READ) {
2049 wp->flags |= BP_WATCHPOINT_HIT_READ;
2050 } else {
2051 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2052 }
2053 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002054 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002055 if (!cpu->watchpoint_hit) {
2056 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002057 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002058 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002059 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002060 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002061 } else {
2062 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002063 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002064 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002065 }
aliguori06d55cc2008-11-18 20:24:06 +00002066 }
aliguori6e140f22008-11-18 20:37:55 +00002067 } else {
2068 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002069 }
2070 }
2071}
2072
pbrook6658ffb2007-03-16 23:58:11 +00002073/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2074 so these check for a hit then pass through to the normal out-of-line
2075 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002076static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2077 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002078{
Peter Maydell66b9b432015-04-26 16:49:24 +01002079 MemTxResult res;
2080 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002081
Peter Maydell66b9b432015-04-26 16:49:24 +01002082 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002083 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002084 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002085 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002086 break;
2087 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002088 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002089 break;
2090 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002091 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002092 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002093 default: abort();
2094 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002095 *pdata = data;
2096 return res;
2097}
2098
2099static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2100 uint64_t val, unsigned size,
2101 MemTxAttrs attrs)
2102{
2103 MemTxResult res;
2104
2105 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2106 switch (size) {
2107 case 1:
2108 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2109 break;
2110 case 2:
2111 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2112 break;
2113 case 4:
2114 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2115 break;
2116 default: abort();
2117 }
2118 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002119}
2120
Avi Kivity1ec9b902012-01-02 12:47:48 +02002121static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002122 .read_with_attrs = watch_mem_read,
2123 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002124 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002125};
pbrook6658ffb2007-03-16 23:58:11 +00002126
Peter Maydellf25a49e2015-04-26 16:49:24 +01002127static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2128 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002129{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002130 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002131 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002132 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002133
blueswir1db7b5422007-05-26 17:36:03 +00002134#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002135 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002136 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002137#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002138 res = address_space_read(subpage->as, addr + subpage->base,
2139 attrs, buf, len);
2140 if (res) {
2141 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002142 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002143 switch (len) {
2144 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002145 *data = ldub_p(buf);
2146 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002147 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002148 *data = lduw_p(buf);
2149 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002151 *data = ldl_p(buf);
2152 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002153 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002154 *data = ldq_p(buf);
2155 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002156 default:
2157 abort();
2158 }
blueswir1db7b5422007-05-26 17:36:03 +00002159}
2160
Peter Maydellf25a49e2015-04-26 16:49:24 +01002161static MemTxResult subpage_write(void *opaque, hwaddr addr,
2162 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002163{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002164 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002165 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002166
blueswir1db7b5422007-05-26 17:36:03 +00002167#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002168 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002169 " value %"PRIx64"\n",
2170 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002171#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002172 switch (len) {
2173 case 1:
2174 stb_p(buf, value);
2175 break;
2176 case 2:
2177 stw_p(buf, value);
2178 break;
2179 case 4:
2180 stl_p(buf, value);
2181 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002182 case 8:
2183 stq_p(buf, value);
2184 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002185 default:
2186 abort();
2187 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002188 return address_space_write(subpage->as, addr + subpage->base,
2189 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002190}
2191
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002192static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002193 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002194{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002195 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002196#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002197 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002198 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002199#endif
2200
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002201 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002202 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002203}
2204
Avi Kivity70c68e42012-01-02 12:32:48 +02002205static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002206 .read_with_attrs = subpage_read,
2207 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002208 .impl.min_access_size = 1,
2209 .impl.max_access_size = 8,
2210 .valid.min_access_size = 1,
2211 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002212 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002213 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002214};
2215
Anthony Liguoric227f092009-10-01 16:12:16 -05002216static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002217 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002218{
2219 int idx, eidx;
2220
2221 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2222 return -1;
2223 idx = SUBPAGE_IDX(start);
2224 eidx = SUBPAGE_IDX(end);
2225#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002226 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2227 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002228#endif
blueswir1db7b5422007-05-26 17:36:03 +00002229 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002230 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002231 }
2232
2233 return 0;
2234}
2235
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002236static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002237{
Anthony Liguoric227f092009-10-01 16:12:16 -05002238 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002239
Anthony Liguori7267c092011-08-20 22:09:37 -05002240 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002241
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002242 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002243 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002244 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002245 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002246 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002247#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002248 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2249 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002250#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002251 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002252
2253 return mmio;
2254}
2255
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002256static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2257 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002258{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002259 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002260 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002261 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002262 .mr = mr,
2263 .offset_within_address_space = 0,
2264 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002265 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002266 };
2267
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002268 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002269}
2270
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002271MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002272{
Peter Maydell32857f42015-10-01 15:29:50 +01002273 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2274 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002275 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002276
2277 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002278}
2279
Avi Kivitye9179ce2009-06-14 11:38:52 +03002280static void io_mem_init(void)
2281{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002282 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002283 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002284 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002285 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002286 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002287 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002288 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002289}
2290
Avi Kivityac1970f2012-10-03 16:22:53 +02002291static void mem_begin(MemoryListener *listener)
2292{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002293 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002294 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2295 uint16_t n;
2296
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002297 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002298 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002299 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002300 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002301 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002303 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002304 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002305
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002306 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002307 d->as = as;
2308 as->next_dispatch = d;
2309}
2310
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002311static void address_space_dispatch_free(AddressSpaceDispatch *d)
2312{
2313 phys_sections_free(&d->map);
2314 g_free(d);
2315}
2316
Paolo Bonzini00752702013-05-29 12:13:54 +02002317static void mem_commit(MemoryListener *listener)
2318{
2319 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002320 AddressSpaceDispatch *cur = as->dispatch;
2321 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002322
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002323 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002324
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002325 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002326 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002327 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002328 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002329}
2330
Avi Kivity1d711482012-10-02 18:54:45 +02002331static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002332{
Peter Maydell32857f42015-10-01 15:29:50 +01002333 CPUAddressSpace *cpuas;
2334 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002335
2336 /* since each CPU stores ram addresses in its TLB cache, we must
2337 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002338 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2339 cpu_reloading_memory_map();
2340 /* The CPU and TLB are protected by the iothread lock.
2341 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2342 * may have split the RCU critical section.
2343 */
2344 d = atomic_rcu_read(&cpuas->as->dispatch);
2345 cpuas->memory_dispatch = d;
2346 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002347}
2348
Avi Kivityac1970f2012-10-03 16:22:53 +02002349void address_space_init_dispatch(AddressSpace *as)
2350{
Paolo Bonzini00752702013-05-29 12:13:54 +02002351 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002352 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002353 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002354 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002355 .region_add = mem_add,
2356 .region_nop = mem_add,
2357 .priority = 0,
2358 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002359 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002360}
2361
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002362void address_space_unregister(AddressSpace *as)
2363{
2364 memory_listener_unregister(&as->dispatch_listener);
2365}
2366
Avi Kivity83f3c252012-10-07 12:59:55 +02002367void address_space_destroy_dispatch(AddressSpace *as)
2368{
2369 AddressSpaceDispatch *d = as->dispatch;
2370
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002371 atomic_rcu_set(&as->dispatch, NULL);
2372 if (d) {
2373 call_rcu(d, address_space_dispatch_free, rcu);
2374 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002375}
2376
Avi Kivity62152b82011-07-26 14:26:14 +03002377static void memory_map_init(void)
2378{
Anthony Liguori7267c092011-08-20 22:09:37 -05002379 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002380
Paolo Bonzini57271d62013-11-07 17:14:37 +01002381 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002382 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002383
Anthony Liguori7267c092011-08-20 22:09:37 -05002384 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002385 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2386 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002387 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002388}
2389
2390MemoryRegion *get_system_memory(void)
2391{
2392 return system_memory;
2393}
2394
Avi Kivity309cb472011-08-08 16:09:03 +03002395MemoryRegion *get_system_io(void)
2396{
2397 return system_io;
2398}
2399
pbrooke2eef172008-06-08 01:09:01 +00002400#endif /* !defined(CONFIG_USER_ONLY) */
2401
bellard13eb76e2004-01-24 15:23:36 +00002402/* physical memory access (slow version, mainly for debug) */
2403#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002404int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002405 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002406{
2407 int l, flags;
2408 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002409 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002410
2411 while (len > 0) {
2412 page = addr & TARGET_PAGE_MASK;
2413 l = (page + TARGET_PAGE_SIZE) - addr;
2414 if (l > len)
2415 l = len;
2416 flags = page_get_flags(page);
2417 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002418 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002419 if (is_write) {
2420 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002421 return -1;
bellard579a97f2007-11-11 14:26:47 +00002422 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002423 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002424 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002425 memcpy(p, buf, l);
2426 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002427 } else {
2428 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002429 return -1;
bellard579a97f2007-11-11 14:26:47 +00002430 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002431 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002432 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002433 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002434 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002435 }
2436 len -= l;
2437 buf += l;
2438 addr += l;
2439 }
Paul Brooka68fe892010-03-01 00:08:59 +00002440 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002441}
bellard8df1cd02005-01-28 22:37:22 +00002442
bellard13eb76e2004-01-24 15:23:36 +00002443#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002444
Paolo Bonzini845b6212015-03-23 11:45:53 +01002445static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002446 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002447{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002448 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2449 /* No early return if dirty_log_mask is or becomes 0, because
2450 * cpu_physical_memory_set_dirty_range will still call
2451 * xen_modified_memory.
2452 */
2453 if (dirty_log_mask) {
2454 dirty_log_mask =
2455 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002456 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002457 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2458 tb_invalidate_phys_range(addr, addr + length);
2459 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2460 }
2461 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002462}
2463
Richard Henderson23326162013-07-08 14:55:59 -07002464static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002465{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002466 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002467
2468 /* Regions are assumed to support 1-4 byte accesses unless
2469 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002470 if (access_size_max == 0) {
2471 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002472 }
Richard Henderson23326162013-07-08 14:55:59 -07002473
2474 /* Bound the maximum access by the alignment of the address. */
2475 if (!mr->ops->impl.unaligned) {
2476 unsigned align_size_max = addr & -addr;
2477 if (align_size_max != 0 && align_size_max < access_size_max) {
2478 access_size_max = align_size_max;
2479 }
2480 }
2481
2482 /* Don't attempt accesses larger than the maximum. */
2483 if (l > access_size_max) {
2484 l = access_size_max;
2485 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002486 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002487
2488 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002489}
2490
Jan Kiszka4840f102015-06-18 18:47:22 +02002491static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002492{
Jan Kiszka4840f102015-06-18 18:47:22 +02002493 bool unlocked = !qemu_mutex_iothread_locked();
2494 bool release_lock = false;
2495
2496 if (unlocked && mr->global_locking) {
2497 qemu_mutex_lock_iothread();
2498 unlocked = false;
2499 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002500 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002501 if (mr->flush_coalesced_mmio) {
2502 if (unlocked) {
2503 qemu_mutex_lock_iothread();
2504 }
2505 qemu_flush_coalesced_mmio_buffer();
2506 if (unlocked) {
2507 qemu_mutex_unlock_iothread();
2508 }
2509 }
2510
2511 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002512}
2513
Peter Maydell5c9eb022015-04-26 16:49:24 +01002514MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2515 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002516{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002517 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002518 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002519 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002520 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002521 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002522 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002523 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002524
Paolo Bonzini41063e12015-03-18 14:21:43 +01002525 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002526 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002527 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002528 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002529
bellard13eb76e2004-01-24 15:23:36 +00002530 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002531 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002532 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002533 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002534 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002535 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002536 switch (l) {
2537 case 8:
2538 /* 64 bit write access */
2539 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002540 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2541 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002542 break;
2543 case 4:
bellard1c213d12005-09-03 10:49:04 +00002544 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002545 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002546 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2547 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002548 break;
2549 case 2:
bellard1c213d12005-09-03 10:49:04 +00002550 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002551 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002552 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2553 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002554 break;
2555 case 1:
bellard1c213d12005-09-03 10:49:04 +00002556 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002557 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002558 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2559 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002560 break;
2561 default:
2562 abort();
bellard13eb76e2004-01-24 15:23:36 +00002563 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002564 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002565 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002566 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002567 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002568 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002569 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002570 }
2571 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002572 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002573 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002574 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002575 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002576 switch (l) {
2577 case 8:
2578 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002579 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2580 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002581 stq_p(buf, val);
2582 break;
2583 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002584 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002585 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2586 attrs);
bellardc27004e2005-01-03 23:35:10 +00002587 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002588 break;
2589 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002590 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002591 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2592 attrs);
bellardc27004e2005-01-03 23:35:10 +00002593 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002594 break;
2595 case 1:
bellard1c213d12005-09-03 10:49:04 +00002596 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002597 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2598 attrs);
bellardc27004e2005-01-03 23:35:10 +00002599 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002600 break;
2601 default:
2602 abort();
bellard13eb76e2004-01-24 15:23:36 +00002603 }
2604 } else {
2605 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002606 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002607 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002608 }
2609 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002610
2611 if (release_lock) {
2612 qemu_mutex_unlock_iothread();
2613 release_lock = false;
2614 }
2615
bellard13eb76e2004-01-24 15:23:36 +00002616 len -= l;
2617 buf += l;
2618 addr += l;
2619 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002620 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002621
Peter Maydell3b643492015-04-26 16:49:23 +01002622 return result;
bellard13eb76e2004-01-24 15:23:36 +00002623}
bellard8df1cd02005-01-28 22:37:22 +00002624
Peter Maydell5c9eb022015-04-26 16:49:24 +01002625MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2626 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002627{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002628 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002629}
2630
Peter Maydell5c9eb022015-04-26 16:49:24 +01002631MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2632 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002633{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002634 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002635}
2636
2637
Avi Kivitya8170e52012-10-23 12:30:10 +02002638void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002639 int len, int is_write)
2640{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002641 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2642 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002643}
2644
Alexander Graf582b55a2013-12-11 14:17:44 +01002645enum write_rom_type {
2646 WRITE_DATA,
2647 FLUSH_CACHE,
2648};
2649
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002650static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002651 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002652{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002653 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002654 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002655 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002656 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002657
Paolo Bonzini41063e12015-03-18 14:21:43 +01002658 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002659 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002660 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002661 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002662
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002663 if (!(memory_region_is_ram(mr) ||
2664 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002665 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002666 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002667 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002668 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002669 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002670 switch (type) {
2671 case WRITE_DATA:
2672 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002673 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002674 break;
2675 case FLUSH_CACHE:
2676 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2677 break;
2678 }
bellardd0ecd2a2006-04-23 17:14:48 +00002679 }
2680 len -= l;
2681 buf += l;
2682 addr += l;
2683 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002684 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002685}
2686
Alexander Graf582b55a2013-12-11 14:17:44 +01002687/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002688void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002689 const uint8_t *buf, int len)
2690{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002691 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002692}
2693
2694void cpu_flush_icache_range(hwaddr start, int len)
2695{
2696 /*
2697 * This function should do the same thing as an icache flush that was
2698 * triggered from within the guest. For TCG we are always cache coherent,
2699 * so there is no need to flush anything. For KVM / Xen we need to flush
2700 * the host's instruction cache at least.
2701 */
2702 if (tcg_enabled()) {
2703 return;
2704 }
2705
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002706 cpu_physical_memory_write_rom_internal(&address_space_memory,
2707 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002708}
2709
aliguori6d16c2f2009-01-22 16:59:11 +00002710typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002711 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002712 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002713 hwaddr addr;
2714 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002715 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002716} BounceBuffer;
2717
2718static BounceBuffer bounce;
2719
aliguoriba223c22009-01-22 16:59:16 +00002720typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002721 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002722 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002723} MapClient;
2724
Fam Zheng38e047b2015-03-16 17:03:35 +08002725QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002726static QLIST_HEAD(map_client_list, MapClient) map_client_list
2727 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002728
Fam Zhenge95205e2015-03-16 17:03:37 +08002729static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002730{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002731 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002732 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002733}
2734
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002735static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002736{
2737 MapClient *client;
2738
Blue Swirl72cf2d42009-09-12 07:36:22 +00002739 while (!QLIST_EMPTY(&map_client_list)) {
2740 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002741 qemu_bh_schedule(client->bh);
2742 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002743 }
2744}
2745
Fam Zhenge95205e2015-03-16 17:03:37 +08002746void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002747{
2748 MapClient *client = g_malloc(sizeof(*client));
2749
Fam Zheng38e047b2015-03-16 17:03:35 +08002750 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002751 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002752 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002753 if (!atomic_read(&bounce.in_use)) {
2754 cpu_notify_map_clients_locked();
2755 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002756 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002757}
2758
Fam Zheng38e047b2015-03-16 17:03:35 +08002759void cpu_exec_init_all(void)
2760{
2761 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002762 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002763 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002764 qemu_mutex_init(&map_client_list_lock);
2765}
2766
Fam Zhenge95205e2015-03-16 17:03:37 +08002767void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002768{
Fam Zhenge95205e2015-03-16 17:03:37 +08002769 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002770
Fam Zhenge95205e2015-03-16 17:03:37 +08002771 qemu_mutex_lock(&map_client_list_lock);
2772 QLIST_FOREACH(client, &map_client_list, link) {
2773 if (client->bh == bh) {
2774 cpu_unregister_map_client_do(client);
2775 break;
2776 }
2777 }
2778 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002779}
2780
2781static void cpu_notify_map_clients(void)
2782{
Fam Zheng38e047b2015-03-16 17:03:35 +08002783 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002784 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002785 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002786}
2787
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002788bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2789{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002790 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002791 hwaddr l, xlat;
2792
Paolo Bonzini41063e12015-03-18 14:21:43 +01002793 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002794 while (len > 0) {
2795 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002796 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2797 if (!memory_access_is_direct(mr, is_write)) {
2798 l = memory_access_size(mr, l, addr);
2799 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002800 return false;
2801 }
2802 }
2803
2804 len -= l;
2805 addr += l;
2806 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002807 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002808 return true;
2809}
2810
aliguori6d16c2f2009-01-22 16:59:11 +00002811/* Map a physical memory region into a host virtual address.
2812 * May map a subset of the requested range, given by and returned in *plen.
2813 * May return NULL if resources needed to perform the mapping are exhausted.
2814 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002815 * Use cpu_register_map_client() to know when retrying the map operation is
2816 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002817 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002818void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002819 hwaddr addr,
2820 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002821 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002822{
Avi Kivitya8170e52012-10-23 12:30:10 +02002823 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002824 hwaddr done = 0;
2825 hwaddr l, xlat, base;
2826 MemoryRegion *mr, *this_mr;
2827 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002828
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002829 if (len == 0) {
2830 return NULL;
2831 }
aliguori6d16c2f2009-01-22 16:59:11 +00002832
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002833 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002834 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002835 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002836
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002837 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002838 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002839 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002840 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002841 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002842 /* Avoid unbounded allocations */
2843 l = MIN(l, TARGET_PAGE_SIZE);
2844 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002845 bounce.addr = addr;
2846 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002847
2848 memory_region_ref(mr);
2849 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002850 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002851 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2852 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002853 }
aliguori6d16c2f2009-01-22 16:59:11 +00002854
Paolo Bonzini41063e12015-03-18 14:21:43 +01002855 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002856 *plen = l;
2857 return bounce.buffer;
2858 }
2859
2860 base = xlat;
2861 raddr = memory_region_get_ram_addr(mr);
2862
2863 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002864 len -= l;
2865 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002866 done += l;
2867 if (len == 0) {
2868 break;
2869 }
2870
2871 l = len;
2872 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2873 if (this_mr != mr || xlat != base + done) {
2874 break;
2875 }
aliguori6d16c2f2009-01-22 16:59:11 +00002876 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002877
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002878 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002879 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002880 *plen = done;
2881 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002882}
2883
Avi Kivityac1970f2012-10-03 16:22:53 +02002884/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002885 * Will also mark the memory as dirty if is_write == 1. access_len gives
2886 * the amount of memory that was actually read or written by the caller.
2887 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002888void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2889 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002890{
2891 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002892 MemoryRegion *mr;
2893 ram_addr_t addr1;
2894
2895 mr = qemu_ram_addr_from_host(buffer, &addr1);
2896 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002897 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002898 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002899 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002900 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002901 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002902 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002903 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002904 return;
2905 }
2906 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002907 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2908 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002909 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002910 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002911 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002912 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002913 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002914 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002915}
bellardd0ecd2a2006-04-23 17:14:48 +00002916
Avi Kivitya8170e52012-10-23 12:30:10 +02002917void *cpu_physical_memory_map(hwaddr addr,
2918 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002919 int is_write)
2920{
2921 return address_space_map(&address_space_memory, addr, plen, is_write);
2922}
2923
Avi Kivitya8170e52012-10-23 12:30:10 +02002924void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2925 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002926{
2927 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2928}
2929
bellard8df1cd02005-01-28 22:37:22 +00002930/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002931static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2932 MemTxAttrs attrs,
2933 MemTxResult *result,
2934 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002935{
bellard8df1cd02005-01-28 22:37:22 +00002936 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002937 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002938 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002939 hwaddr l = 4;
2940 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002941 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002942 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002943
Paolo Bonzini41063e12015-03-18 14:21:43 +01002944 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002945 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002946 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002947 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002948
bellard8df1cd02005-01-28 22:37:22 +00002949 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002950 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002951#if defined(TARGET_WORDS_BIGENDIAN)
2952 if (endian == DEVICE_LITTLE_ENDIAN) {
2953 val = bswap32(val);
2954 }
2955#else
2956 if (endian == DEVICE_BIG_ENDIAN) {
2957 val = bswap32(val);
2958 }
2959#endif
bellard8df1cd02005-01-28 22:37:22 +00002960 } else {
2961 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002962 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002963 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002964 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002965 switch (endian) {
2966 case DEVICE_LITTLE_ENDIAN:
2967 val = ldl_le_p(ptr);
2968 break;
2969 case DEVICE_BIG_ENDIAN:
2970 val = ldl_be_p(ptr);
2971 break;
2972 default:
2973 val = ldl_p(ptr);
2974 break;
2975 }
Peter Maydell50013112015-04-26 16:49:24 +01002976 r = MEMTX_OK;
2977 }
2978 if (result) {
2979 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002980 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002981 if (release_lock) {
2982 qemu_mutex_unlock_iothread();
2983 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002984 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002985 return val;
2986}
2987
Peter Maydell50013112015-04-26 16:49:24 +01002988uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2989 MemTxAttrs attrs, MemTxResult *result)
2990{
2991 return address_space_ldl_internal(as, addr, attrs, result,
2992 DEVICE_NATIVE_ENDIAN);
2993}
2994
2995uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2996 MemTxAttrs attrs, MemTxResult *result)
2997{
2998 return address_space_ldl_internal(as, addr, attrs, result,
2999 DEVICE_LITTLE_ENDIAN);
3000}
3001
3002uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3003 MemTxAttrs attrs, MemTxResult *result)
3004{
3005 return address_space_ldl_internal(as, addr, attrs, result,
3006 DEVICE_BIG_ENDIAN);
3007}
3008
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003009uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003010{
Peter Maydell50013112015-04-26 16:49:24 +01003011 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003012}
3013
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003014uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003015{
Peter Maydell50013112015-04-26 16:49:24 +01003016 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003017}
3018
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003019uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003020{
Peter Maydell50013112015-04-26 16:49:24 +01003021 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003022}
3023
bellard84b7b8e2005-11-28 21:19:04 +00003024/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003025static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3026 MemTxAttrs attrs,
3027 MemTxResult *result,
3028 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003029{
bellard84b7b8e2005-11-28 21:19:04 +00003030 uint8_t *ptr;
3031 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003032 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003033 hwaddr l = 8;
3034 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003035 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003036 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003037
Paolo Bonzini41063e12015-03-18 14:21:43 +01003038 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003039 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003040 false);
3041 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003042 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003043
bellard84b7b8e2005-11-28 21:19:04 +00003044 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003045 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003046#if defined(TARGET_WORDS_BIGENDIAN)
3047 if (endian == DEVICE_LITTLE_ENDIAN) {
3048 val = bswap64(val);
3049 }
3050#else
3051 if (endian == DEVICE_BIG_ENDIAN) {
3052 val = bswap64(val);
3053 }
3054#endif
bellard84b7b8e2005-11-28 21:19:04 +00003055 } else {
3056 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003057 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003058 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003059 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003060 switch (endian) {
3061 case DEVICE_LITTLE_ENDIAN:
3062 val = ldq_le_p(ptr);
3063 break;
3064 case DEVICE_BIG_ENDIAN:
3065 val = ldq_be_p(ptr);
3066 break;
3067 default:
3068 val = ldq_p(ptr);
3069 break;
3070 }
Peter Maydell50013112015-04-26 16:49:24 +01003071 r = MEMTX_OK;
3072 }
3073 if (result) {
3074 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003075 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003076 if (release_lock) {
3077 qemu_mutex_unlock_iothread();
3078 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003079 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003080 return val;
3081}
3082
Peter Maydell50013112015-04-26 16:49:24 +01003083uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3084 MemTxAttrs attrs, MemTxResult *result)
3085{
3086 return address_space_ldq_internal(as, addr, attrs, result,
3087 DEVICE_NATIVE_ENDIAN);
3088}
3089
3090uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3091 MemTxAttrs attrs, MemTxResult *result)
3092{
3093 return address_space_ldq_internal(as, addr, attrs, result,
3094 DEVICE_LITTLE_ENDIAN);
3095}
3096
3097uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3098 MemTxAttrs attrs, MemTxResult *result)
3099{
3100 return address_space_ldq_internal(as, addr, attrs, result,
3101 DEVICE_BIG_ENDIAN);
3102}
3103
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003104uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003105{
Peter Maydell50013112015-04-26 16:49:24 +01003106 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003107}
3108
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003109uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003110{
Peter Maydell50013112015-04-26 16:49:24 +01003111 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003112}
3113
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003114uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003115{
Peter Maydell50013112015-04-26 16:49:24 +01003116 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003117}
3118
bellardaab33092005-10-30 20:48:42 +00003119/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003120uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3121 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003122{
3123 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003124 MemTxResult r;
3125
3126 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3127 if (result) {
3128 *result = r;
3129 }
bellardaab33092005-10-30 20:48:42 +00003130 return val;
3131}
3132
Peter Maydell50013112015-04-26 16:49:24 +01003133uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3134{
3135 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3136}
3137
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003138/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003139static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3140 hwaddr addr,
3141 MemTxAttrs attrs,
3142 MemTxResult *result,
3143 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003144{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003145 uint8_t *ptr;
3146 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003147 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003148 hwaddr l = 2;
3149 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003150 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003151 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003152
Paolo Bonzini41063e12015-03-18 14:21:43 +01003153 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003154 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003155 false);
3156 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003157 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003158
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003159 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003160 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161#if defined(TARGET_WORDS_BIGENDIAN)
3162 if (endian == DEVICE_LITTLE_ENDIAN) {
3163 val = bswap16(val);
3164 }
3165#else
3166 if (endian == DEVICE_BIG_ENDIAN) {
3167 val = bswap16(val);
3168 }
3169#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003170 } else {
3171 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003172 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003173 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003174 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003175 switch (endian) {
3176 case DEVICE_LITTLE_ENDIAN:
3177 val = lduw_le_p(ptr);
3178 break;
3179 case DEVICE_BIG_ENDIAN:
3180 val = lduw_be_p(ptr);
3181 break;
3182 default:
3183 val = lduw_p(ptr);
3184 break;
3185 }
Peter Maydell50013112015-04-26 16:49:24 +01003186 r = MEMTX_OK;
3187 }
3188 if (result) {
3189 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003190 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003191 if (release_lock) {
3192 qemu_mutex_unlock_iothread();
3193 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003194 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003195 return val;
bellardaab33092005-10-30 20:48:42 +00003196}
3197
Peter Maydell50013112015-04-26 16:49:24 +01003198uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3199 MemTxAttrs attrs, MemTxResult *result)
3200{
3201 return address_space_lduw_internal(as, addr, attrs, result,
3202 DEVICE_NATIVE_ENDIAN);
3203}
3204
3205uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3206 MemTxAttrs attrs, MemTxResult *result)
3207{
3208 return address_space_lduw_internal(as, addr, attrs, result,
3209 DEVICE_LITTLE_ENDIAN);
3210}
3211
3212uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3213 MemTxAttrs attrs, MemTxResult *result)
3214{
3215 return address_space_lduw_internal(as, addr, attrs, result,
3216 DEVICE_BIG_ENDIAN);
3217}
3218
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003219uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003220{
Peter Maydell50013112015-04-26 16:49:24 +01003221 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003222}
3223
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003224uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003225{
Peter Maydell50013112015-04-26 16:49:24 +01003226 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003227}
3228
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003229uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003230{
Peter Maydell50013112015-04-26 16:49:24 +01003231 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003232}
3233
bellard8df1cd02005-01-28 22:37:22 +00003234/* warning: addr must be aligned. The ram page is not masked as dirty
3235 and the code inside is not invalidated. It is useful if the dirty
3236 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003237void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3238 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003239{
bellard8df1cd02005-01-28 22:37:22 +00003240 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003241 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003242 hwaddr l = 4;
3243 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003244 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003245 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003246 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003247
Paolo Bonzini41063e12015-03-18 14:21:43 +01003248 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003249 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003250 true);
3251 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003252 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003253
Peter Maydell50013112015-04-26 16:49:24 +01003254 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003255 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003256 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003257 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003258 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003259
Paolo Bonzini845b6212015-03-23 11:45:53 +01003260 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3261 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003262 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003263 r = MEMTX_OK;
3264 }
3265 if (result) {
3266 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003267 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003268 if (release_lock) {
3269 qemu_mutex_unlock_iothread();
3270 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003271 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003272}
3273
Peter Maydell50013112015-04-26 16:49:24 +01003274void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3275{
3276 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3277}
3278
bellard8df1cd02005-01-28 22:37:22 +00003279/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003280static inline void address_space_stl_internal(AddressSpace *as,
3281 hwaddr addr, uint32_t val,
3282 MemTxAttrs attrs,
3283 MemTxResult *result,
3284 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003285{
bellard8df1cd02005-01-28 22:37:22 +00003286 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003287 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003288 hwaddr l = 4;
3289 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003290 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003291 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003292
Paolo Bonzini41063e12015-03-18 14:21:43 +01003293 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003294 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003295 true);
3296 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003297 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003298
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299#if defined(TARGET_WORDS_BIGENDIAN)
3300 if (endian == DEVICE_LITTLE_ENDIAN) {
3301 val = bswap32(val);
3302 }
3303#else
3304 if (endian == DEVICE_BIG_ENDIAN) {
3305 val = bswap32(val);
3306 }
3307#endif
Peter Maydell50013112015-04-26 16:49:24 +01003308 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003309 } else {
bellard8df1cd02005-01-28 22:37:22 +00003310 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003311 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003312 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313 switch (endian) {
3314 case DEVICE_LITTLE_ENDIAN:
3315 stl_le_p(ptr, val);
3316 break;
3317 case DEVICE_BIG_ENDIAN:
3318 stl_be_p(ptr, val);
3319 break;
3320 default:
3321 stl_p(ptr, val);
3322 break;
3323 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003324 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003325 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003326 }
Peter Maydell50013112015-04-26 16:49:24 +01003327 if (result) {
3328 *result = r;
3329 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003330 if (release_lock) {
3331 qemu_mutex_unlock_iothread();
3332 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003333 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003334}
3335
3336void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3337 MemTxAttrs attrs, MemTxResult *result)
3338{
3339 address_space_stl_internal(as, addr, val, attrs, result,
3340 DEVICE_NATIVE_ENDIAN);
3341}
3342
3343void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3344 MemTxAttrs attrs, MemTxResult *result)
3345{
3346 address_space_stl_internal(as, addr, val, attrs, result,
3347 DEVICE_LITTLE_ENDIAN);
3348}
3349
3350void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3351 MemTxAttrs attrs, MemTxResult *result)
3352{
3353 address_space_stl_internal(as, addr, val, attrs, result,
3354 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003355}
3356
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003357void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358{
Peter Maydell50013112015-04-26 16:49:24 +01003359 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003360}
3361
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003362void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363{
Peter Maydell50013112015-04-26 16:49:24 +01003364 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365}
3366
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003367void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368{
Peter Maydell50013112015-04-26 16:49:24 +01003369 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370}
3371
bellardaab33092005-10-30 20:48:42 +00003372/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003373void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003375{
3376 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003377 MemTxResult r;
3378
3379 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3380 if (result) {
3381 *result = r;
3382 }
3383}
3384
3385void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3386{
3387 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003388}
3389
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003390/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003391static inline void address_space_stw_internal(AddressSpace *as,
3392 hwaddr addr, uint32_t val,
3393 MemTxAttrs attrs,
3394 MemTxResult *result,
3395 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003396{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003397 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003398 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003399 hwaddr l = 2;
3400 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003401 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003402 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003403
Paolo Bonzini41063e12015-03-18 14:21:43 +01003404 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003405 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003406 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003407 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003408
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003409#if defined(TARGET_WORDS_BIGENDIAN)
3410 if (endian == DEVICE_LITTLE_ENDIAN) {
3411 val = bswap16(val);
3412 }
3413#else
3414 if (endian == DEVICE_BIG_ENDIAN) {
3415 val = bswap16(val);
3416 }
3417#endif
Peter Maydell50013112015-04-26 16:49:24 +01003418 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003419 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003420 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003421 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003422 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003423 switch (endian) {
3424 case DEVICE_LITTLE_ENDIAN:
3425 stw_le_p(ptr, val);
3426 break;
3427 case DEVICE_BIG_ENDIAN:
3428 stw_be_p(ptr, val);
3429 break;
3430 default:
3431 stw_p(ptr, val);
3432 break;
3433 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003434 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003435 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003436 }
Peter Maydell50013112015-04-26 16:49:24 +01003437 if (result) {
3438 *result = r;
3439 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003440 if (release_lock) {
3441 qemu_mutex_unlock_iothread();
3442 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003443 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003444}
3445
3446void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3447 MemTxAttrs attrs, MemTxResult *result)
3448{
3449 address_space_stw_internal(as, addr, val, attrs, result,
3450 DEVICE_NATIVE_ENDIAN);
3451}
3452
3453void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3454 MemTxAttrs attrs, MemTxResult *result)
3455{
3456 address_space_stw_internal(as, addr, val, attrs, result,
3457 DEVICE_LITTLE_ENDIAN);
3458}
3459
3460void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3461 MemTxAttrs attrs, MemTxResult *result)
3462{
3463 address_space_stw_internal(as, addr, val, attrs, result,
3464 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003465}
3466
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003467void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003468{
Peter Maydell50013112015-04-26 16:49:24 +01003469 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003470}
3471
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003472void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003473{
Peter Maydell50013112015-04-26 16:49:24 +01003474 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003475}
3476
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003477void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003478{
Peter Maydell50013112015-04-26 16:49:24 +01003479 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003480}
3481
bellardaab33092005-10-30 20:48:42 +00003482/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003483void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3484 MemTxAttrs attrs, MemTxResult *result)
3485{
3486 MemTxResult r;
3487 val = tswap64(val);
3488 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3489 if (result) {
3490 *result = r;
3491 }
3492}
3493
3494void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3495 MemTxAttrs attrs, MemTxResult *result)
3496{
3497 MemTxResult r;
3498 val = cpu_to_le64(val);
3499 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3500 if (result) {
3501 *result = r;
3502 }
3503}
3504void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3505 MemTxAttrs attrs, MemTxResult *result)
3506{
3507 MemTxResult r;
3508 val = cpu_to_be64(val);
3509 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3510 if (result) {
3511 *result = r;
3512 }
3513}
3514
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003515void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003516{
Peter Maydell50013112015-04-26 16:49:24 +01003517 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003518}
3519
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003520void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003521{
Peter Maydell50013112015-04-26 16:49:24 +01003522 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003523}
3524
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003525void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003526{
Peter Maydell50013112015-04-26 16:49:24 +01003527 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003528}
3529
aliguori5e2972f2009-03-28 17:51:36 +00003530/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003531int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003532 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003533{
3534 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003535 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003536 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003537
3538 while (len > 0) {
3539 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003540 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003541 /* if no physical page mapped, return an error */
3542 if (phys_addr == -1)
3543 return -1;
3544 l = (page + TARGET_PAGE_SIZE) - addr;
3545 if (l > len)
3546 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003547 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003548 if (is_write) {
3549 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3550 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003551 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3552 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003553 }
bellard13eb76e2004-01-24 15:23:36 +00003554 len -= l;
3555 buf += l;
3556 addr += l;
3557 }
3558 return 0;
3559}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003560
3561/*
3562 * Allows code that needs to deal with migration bitmaps etc to still be built
3563 * target independent.
3564 */
3565size_t qemu_target_page_bits(void)
3566{
3567 return TARGET_PAGE_BITS;
3568}
3569
Paul Brooka68fe892010-03-01 00:08:59 +00003570#endif
bellard13eb76e2004-01-24 15:23:36 +00003571
Blue Swirl8e4a4242013-01-06 18:30:17 +00003572/*
3573 * A helper function for the _utterly broken_ virtio device model to find out if
3574 * it's running on a big endian machine. Don't do this at home kids!
3575 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003576bool target_words_bigendian(void);
3577bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003578{
3579#if defined(TARGET_WORDS_BIGENDIAN)
3580 return true;
3581#else
3582 return false;
3583#endif
3584}
3585
Wen Congyang76f35532012-05-07 12:04:18 +08003586#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003587bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003588{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003589 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003590 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003591 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003592
Paolo Bonzini41063e12015-03-18 14:21:43 +01003593 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003594 mr = address_space_translate(&address_space_memory,
3595 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003596
Paolo Bonzini41063e12015-03-18 14:21:43 +01003597 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3598 rcu_read_unlock();
3599 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003600}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003601
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003602int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003603{
3604 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003605 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003606
Mike Day0dc3f442013-09-05 14:41:35 -04003607 rcu_read_lock();
3608 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003609 ret = func(block->idstr, block->host, block->offset,
3610 block->used_length, opaque);
3611 if (ret) {
3612 break;
3613 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003614 }
Mike Day0dc3f442013-09-05 14:41:35 -04003615 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003616 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003617}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003618#endif