blob: a0289615872ca3c026656c76f74441b30d33cbf8 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030091/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030092 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030093#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200314static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200315 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000316{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200317 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200318 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200319 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200320
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200321 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200322 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200324 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200325 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100326 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200327 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200328
329 if (sections[lp.ptr].size.hi ||
330 range_covers_byte(sections[lp.ptr].offset_within_address_space,
331 sections[lp.ptr].size.lo, addr)) {
332 return &sections[lp.ptr];
333 } else {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200336}
337
Blue Swirle5548612012-04-21 13:08:33 +0000338bool memory_region_is_unassigned(MemoryRegion *mr)
339{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200340 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000341 && mr != &io_mem_watch;
342}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100344/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200345static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 hwaddr addr,
347 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200348{
Jan Kiszka90260c62013-05-26 21:46:51 +0200349 MemoryRegionSection *section;
350 subpage_t *subpage;
351
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 if (resolve_subpage && section->mr->subpage) {
354 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200355 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 }
357 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200358}
359
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100360/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200361static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200362address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200363 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200364{
365 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200366 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100367 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200368
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200369 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200370 /* Compute offset within MemoryRegionSection */
371 addr -= section->offset_within_address_space;
372
373 /* Compute offset within MemoryRegion */
374 *xlat = addr + section->offset_within_region;
375
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200376 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200377
378 /* MMIO registers can be expected to perform full-width accesses based only
379 * on their address, without considering adjacent registers that could
380 * decode to completely different MemoryRegions. When such registers
381 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
382 * regions overlap wildly. For this reason we cannot clamp the accesses
383 * here.
384 *
385 * If the length is small (as is the case for address_space_ldl/stl),
386 * everything works fine. If the incoming length is large, however,
387 * the caller really has to do the clamping through memory_access_size.
388 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200390 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200391 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
392 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 return section;
394}
Jan Kiszka90260c62013-05-26 21:46:51 +0200395
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100396static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
397{
398 if (memory_region_is_ram(mr)) {
399 return !(is_write && mr->readonly);
400 }
401 if (memory_region_is_romd(mr)) {
402 return !is_write;
403 }
404
405 return false;
406}
407
Paolo Bonzini41063e12015-03-18 14:21:43 +0100408/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200409MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
410 hwaddr *xlat, hwaddr *plen,
411 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200412{
Avi Kivity30951152012-10-30 13:47:46 +0200413 IOMMUTLBEntry iotlb;
414 MemoryRegionSection *section;
415 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200416
417 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100418 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
419 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200420 mr = section->mr;
421
422 if (!mr->iommu_ops) {
423 break;
424 }
425
Le Tan8d7b8cb2014-08-16 13:55:37 +0800426 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200427 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
428 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700429 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200430 if (!(iotlb.perm & (1 << is_write))) {
431 mr = &io_mem_unassigned;
432 break;
433 }
434
435 as = iotlb.target_as;
436 }
437
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000438 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100439 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100441 }
442
Avi Kivity30951152012-10-30 13:47:46 +0200443 *xlat = addr;
444 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200445}
446
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100447/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200448MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200449address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
450 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200451{
Avi Kivity30951152012-10-30 13:47:46 +0200452 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100453 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200454 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200455
456 assert(!section->mr->iommu_ops);
457 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200458}
bellard9fa3e852004-01-04 18:06:42 +0000459#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000460
Andreas Färberb170fce2013-01-20 20:23:22 +0100461#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000462
Juan Quintelae59fb372009-09-29 22:48:21 +0200463static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200464{
Andreas Färber259186a2013-01-17 18:51:17 +0100465 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200466
aurel323098dba2009-03-07 21:28:24 +0000467 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
468 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100469 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100470 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000471
472 return 0;
473}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475static int cpu_common_pre_load(void *opaque)
476{
477 CPUState *cpu = opaque;
478
Paolo Bonziniadee6422014-12-19 12:53:14 +0100479 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400480
481 return 0;
482}
483
484static bool cpu_common_exception_index_needed(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489}
490
491static const VMStateDescription vmstate_cpu_common_exception_index = {
492 .name = "cpu_common/exception_index",
493 .version_id = 1,
494 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200495 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496 .fields = (VMStateField[]) {
497 VMSTATE_INT32(exception_index, CPUState),
498 VMSTATE_END_OF_LIST()
499 }
500};
501
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300502static bool cpu_common_crash_occurred_needed(void *opaque)
503{
504 CPUState *cpu = opaque;
505
506 return cpu->crash_occurred;
507}
508
509static const VMStateDescription vmstate_cpu_common_crash_occurred = {
510 .name = "cpu_common/crash_occurred",
511 .version_id = 1,
512 .minimum_version_id = 1,
513 .needed = cpu_common_crash_occurred_needed,
514 .fields = (VMStateField[]) {
515 VMSTATE_BOOL(crash_occurred, CPUState),
516 VMSTATE_END_OF_LIST()
517 }
518};
519
Andreas Färber1a1562f2013-06-17 04:09:11 +0200520const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 .name = "cpu_common",
522 .version_id = 1,
523 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200525 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200526 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100527 VMSTATE_UINT32(halted, CPUState),
528 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200529 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200531 .subsections = (const VMStateDescription*[]) {
532 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300533 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200534 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 }
536};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200537
pbrook9656f322008-07-01 20:01:19 +0000538#endif
539
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100540CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400541{
Andreas Färberbdc44642013-06-24 23:50:24 +0200542 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400543
Andreas Färberbdc44642013-06-24 23:50:24 +0200544 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100545 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200546 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100547 }
Glauber Costa950f1472009-06-09 12:15:18 -0400548 }
549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400551}
552
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000553#if !defined(CONFIG_USER_ONLY)
554void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
555{
556 /* We only support one address space per cpu at the moment. */
557 assert(cpu->as == as);
558
Peter Maydell32857f42015-10-01 15:29:50 +0100559 if (cpu->cpu_ases) {
560 /* We've already registered the listener for our only AS */
561 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562 }
Peter Maydell32857f42015-10-01 15:29:50 +0100563
564 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
565 cpu->cpu_ases[0].cpu = cpu;
566 cpu->cpu_ases[0].as = as;
567 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
568 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000569}
570#endif
571
Bharata B Raob7bca732015-06-23 19:31:13 -0700572#ifndef CONFIG_USER_ONLY
573static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
574
575static int cpu_get_free_index(Error **errp)
576{
577 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
578
579 if (cpu >= MAX_CPUMASK_BITS) {
580 error_setg(errp, "Trying to use more CPUs than max of %d",
581 MAX_CPUMASK_BITS);
582 return -1;
583 }
584
585 bitmap_set(cpu_index_map, cpu, 1);
586 return cpu;
587}
588
589void cpu_exec_exit(CPUState *cpu)
590{
591 if (cpu->cpu_index == -1) {
592 /* cpu_index was never allocated by this @cpu or was already freed. */
593 return;
594 }
595
596 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
597 cpu->cpu_index = -1;
598}
599#else
600
601static int cpu_get_free_index(Error **errp)
602{
603 CPUState *some_cpu;
604 int cpu_index = 0;
605
606 CPU_FOREACH(some_cpu) {
607 cpu_index++;
608 }
609 return cpu_index;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614}
615#endif
616
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700617void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000618{
Andreas Färberb170fce2013-01-20 20:23:22 +0100619 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000620 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700621 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000622
Eduardo Habkost291135b2015-04-27 17:00:33 -0300623#ifndef CONFIG_USER_ONLY
624 cpu->as = &address_space_memory;
625 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300626#endif
627
pbrookc2764712009-03-07 15:24:59 +0000628#if defined(CONFIG_USER_ONLY)
629 cpu_list_lock();
630#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700631 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
632 if (local_err) {
633 error_propagate(errp, local_err);
634#if defined(CONFIG_USER_ONLY)
635 cpu_list_unlock();
636#endif
637 return;
bellard6a00d602005-11-21 23:25:50 +0000638 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200639 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000640#if defined(CONFIG_USER_ONLY)
641 cpu_list_unlock();
642#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200643 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
644 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
645 }
pbrookb3c77242008-06-30 16:31:04 +0000646#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600647 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700648 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200650 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000651#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100652 if (cc->vmsd != NULL) {
653 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
654 }
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Paul Brook94df27f2010-02-28 23:47:45 +0000657#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200658static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000659{
660 tb_invalidate_phys_page_range(pc, pc + 1, 0);
661}
662#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200663static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400664{
Max Filippove8262a12013-09-27 22:29:17 +0400665 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
666 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000667 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100668 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400669 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400670}
bellardc27004e2005-01-03 23:35:10 +0000671#endif
bellardd720b932004-04-25 17:57:43 +0000672
Paul Brookc527ee82010-03-01 03:31:14 +0000673#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200674void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000675
676{
677}
678
Peter Maydell3ee887e2014-09-12 14:06:48 +0100679int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
680 int flags)
681{
682 return -ENOSYS;
683}
684
685void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
686{
687}
688
Andreas Färber75a34032013-09-02 16:57:02 +0200689int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000690 int flags, CPUWatchpoint **watchpoint)
691{
692 return -ENOSYS;
693}
694#else
pbrook6658ffb2007-03-16 23:58:11 +0000695/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200696int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000697 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000698{
aliguoric0ce9982008-11-25 22:13:57 +0000699 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000700
Peter Maydell05068c02014-09-12 14:06:48 +0100701 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700702 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200703 error_report("tried to set invalid watchpoint at %"
704 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000705 return -EINVAL;
706 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500707 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000708
aliguoria1d1bb32008-11-18 20:07:32 +0000709 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100710 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000711 wp->flags = flags;
712
aliguori2dc9f412008-11-18 20:56:59 +0000713 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200714 if (flags & BP_GDB) {
715 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
716 } else {
717 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
718 }
aliguoria1d1bb32008-11-18 20:07:32 +0000719
Andreas Färber31b030d2013-09-04 01:29:02 +0200720 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000721
722 if (watchpoint)
723 *watchpoint = wp;
724 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000725}
726
aliguoria1d1bb32008-11-18 20:07:32 +0000727/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000729 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000730{
aliguoria1d1bb32008-11-18 20:07:32 +0000731 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000732
Andreas Färberff4700b2013-08-26 18:23:18 +0200733 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100734 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000735 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200736 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000737 return 0;
738 }
739 }
aliguoria1d1bb32008-11-18 20:07:32 +0000740 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000741}
742
aliguoria1d1bb32008-11-18 20:07:32 +0000743/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200744void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000745{
Andreas Färberff4700b2013-08-26 18:23:18 +0200746 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000747
Andreas Färber31b030d2013-09-04 01:29:02 +0200748 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000749
Anthony Liguori7267c092011-08-20 22:09:37 -0500750 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000751}
752
aliguoria1d1bb32008-11-18 20:07:32 +0000753/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200754void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000755{
aliguoric0ce9982008-11-25 22:13:57 +0000756 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000757
Andreas Färberff4700b2013-08-26 18:23:18 +0200758 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200759 if (wp->flags & mask) {
760 cpu_watchpoint_remove_by_ref(cpu, wp);
761 }
aliguoric0ce9982008-11-25 22:13:57 +0000762 }
aliguoria1d1bb32008-11-18 20:07:32 +0000763}
Peter Maydell05068c02014-09-12 14:06:48 +0100764
765/* Return true if this watchpoint address matches the specified
766 * access (ie the address range covered by the watchpoint overlaps
767 * partially or completely with the address range covered by the
768 * access).
769 */
770static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
771 vaddr addr,
772 vaddr len)
773{
774 /* We know the lengths are non-zero, but a little caution is
775 * required to avoid errors in the case where the range ends
776 * exactly at the top of the address space and so addr + len
777 * wraps round to zero.
778 */
779 vaddr wpend = wp->vaddr + wp->len - 1;
780 vaddr addrend = addr + len - 1;
781
782 return !(addr > wpend || wp->vaddr > addrend);
783}
784
Paul Brookc527ee82010-03-01 03:31:14 +0000785#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000786
787/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200788int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000789 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000792
Anthony Liguori7267c092011-08-20 22:09:37 -0500793 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000794
795 bp->pc = pc;
796 bp->flags = flags;
797
aliguori2dc9f412008-11-18 20:56:59 +0000798 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200801 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200802 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200803 }
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Andreas Färberf0c3c502013-08-26 21:22:53 +0200805 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000806
Andreas Färber00b941e2013-06-29 18:55:54 +0200807 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000808 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200809 }
aliguoria1d1bb32008-11-18 20:07:32 +0000810 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000811}
812
813/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200814int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000815{
aliguoria1d1bb32008-11-18 20:07:32 +0000816 CPUBreakpoint *bp;
817
Andreas Färberf0c3c502013-08-26 21:22:53 +0200818 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000819 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000821 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000822 }
bellard4c3a88a2003-07-26 12:06:08 +0000823 }
aliguoria1d1bb32008-11-18 20:07:32 +0000824 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000825}
826
aliguoria1d1bb32008-11-18 20:07:32 +0000827/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200828void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000829{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200830 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
831
832 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000833
Anthony Liguori7267c092011-08-20 22:09:37 -0500834 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000835}
836
837/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200838void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000839{
aliguoric0ce9982008-11-25 22:13:57 +0000840 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200843 if (bp->flags & mask) {
844 cpu_breakpoint_remove_by_ref(cpu, bp);
845 }
aliguoric0ce9982008-11-25 22:13:57 +0000846 }
bellard4c3a88a2003-07-26 12:06:08 +0000847}
848
bellardc33a3462003-07-29 20:50:33 +0000849/* enable or disable single step mode. EXCP_DEBUG is returned by the
850 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200851void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000852{
Andreas Färbered2803d2013-06-21 20:20:45 +0200853 if (cpu->singlestep_enabled != enabled) {
854 cpu->singlestep_enabled = enabled;
855 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200856 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200857 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100858 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000859 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700860 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862 }
bellardc33a3462003-07-29 20:50:33 +0000863}
864
Andreas Färbera47dddd2013-09-03 17:38:47 +0200865void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000866{
867 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000868 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000869
870 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000871 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000872 fprintf(stderr, "qemu: fatal: ");
873 vfprintf(stderr, fmt, ap);
874 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200875 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000876 if (qemu_log_enabled()) {
877 qemu_log("qemu: fatal: ");
878 qemu_log_vprintf(fmt, ap2);
879 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200880 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000881 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000882 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000883 }
pbrook493ae1f2007-11-23 16:53:59 +0000884 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000885 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300886 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200887#if defined(CONFIG_USER_ONLY)
888 {
889 struct sigaction act;
890 sigfillset(&act.sa_mask);
891 act.sa_handler = SIG_DFL;
892 sigaction(SIGABRT, &act, NULL);
893 }
894#endif
bellard75012672003-06-21 13:11:07 +0000895 abort();
896}
897
bellard01243112004-01-04 15:48:17 +0000898#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400899/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200900static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
901{
902 RAMBlock *block;
903
Paolo Bonzini43771532013-09-09 17:58:40 +0200904 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200905 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200906 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907 }
Mike Day0dc3f442013-09-05 14:41:35 -0400908 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200909 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200910 goto found;
911 }
912 }
913
914 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
915 abort();
916
917found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200918 /* It is safe to write mru_block outside the iothread lock. This
919 * is what happens:
920 *
921 * mru_block = xxx
922 * rcu_read_unlock()
923 * xxx removed from list
924 * rcu_read_lock()
925 * read mru_block
926 * mru_block = NULL;
927 * call_rcu(reclaim_ramblock, xxx);
928 * rcu_read_unlock()
929 *
930 * atomic_rcu_set is not needed here. The block was already published
931 * when it was placed into the list. Here we're just making an extra
932 * copy of the pointer.
933 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200934 ram_list.mru_block = block;
935 return block;
936}
937
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200938static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000939{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700940 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200942 RAMBlock *block;
943 ram_addr_t end;
944
945 end = TARGET_PAGE_ALIGN(start + length);
946 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000947
Mike Day0dc3f442013-09-05 14:41:35 -0400948 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 block = qemu_get_ram_block(start);
950 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200951 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700952 CPU_FOREACH(cpu) {
953 tlb_reset_dirty(cpu, start1, length);
954 }
Mike Day0dc3f442013-09-05 14:41:35 -0400955 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200956}
957
958/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000959bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
960 ram_addr_t length,
961 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200962{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963 unsigned long end, page;
964 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200965
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000966 if (length == 0) {
967 return false;
968 }
969
970 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
971 page = start >> TARGET_PAGE_BITS;
972 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
973 page, end - page);
974
975 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200976 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200977 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000978
979 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000980}
981
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100982/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200983hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200984 MemoryRegionSection *section,
985 target_ulong vaddr,
986 hwaddr paddr, hwaddr xlat,
987 int prot,
988 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000989{
Avi Kivitya8170e52012-10-23 12:30:10 +0200990 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000991 CPUWatchpoint *wp;
992
Blue Swirlcc5bea62012-04-14 14:56:48 +0000993 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000994 /* Normal RAM. */
995 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200996 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000997 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000999 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001000 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001001 }
1002 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001003 AddressSpaceDispatch *d;
1004
1005 d = atomic_rcu_read(&section->address_space->dispatch);
1006 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001007 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001008 }
1009
1010 /* Make accesses to pages with watchpoints go via the
1011 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001012 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001013 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001014 /* Avoid trapping reads of pages with a write breakpoint. */
1015 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001016 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001017 *address |= TLB_MMIO;
1018 break;
1019 }
1020 }
1021 }
1022
1023 return iotlb;
1024}
bellard9fa3e852004-01-04 18:06:42 +00001025#endif /* defined(CONFIG_USER_ONLY) */
1026
pbrooke2eef172008-06-08 01:09:01 +00001027#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001028
Anthony Liguoric227f092009-10-01 16:12:16 -05001029static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001030 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001031static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001032
Igor Mammedova2b257d2014-10-31 16:38:37 +00001033static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1034 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001035
1036/*
1037 * Set a custom physical guest memory alloator.
1038 * Accelerators with unusual needs may need this. Hopefully, we can
1039 * get rid of it eventually.
1040 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001041void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001042{
1043 phys_mem_alloc = alloc;
1044}
1045
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001046static uint16_t phys_section_add(PhysPageMap *map,
1047 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001048{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001049 /* The physical section number is ORed with a page-aligned
1050 * pointer to produce the iotlb entries. Thus it should
1051 * never overflow into the page-aligned value.
1052 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001054
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001055 if (map->sections_nb == map->sections_nb_alloc) {
1056 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1057 map->sections = g_renew(MemoryRegionSection, map->sections,
1058 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001059 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001061 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001062 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001063}
1064
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001065static void phys_section_destroy(MemoryRegion *mr)
1066{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001067 memory_region_unref(mr);
1068
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001069 if (mr->subpage) {
1070 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001071 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001072 g_free(subpage);
1073 }
1074}
1075
Paolo Bonzini60926662013-05-29 12:30:26 +02001076static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001077{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001078 while (map->sections_nb > 0) {
1079 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001080 phys_section_destroy(section->mr);
1081 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001082 g_free(map->sections);
1083 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001084}
1085
Avi Kivityac1970f2012-10-03 16:22:53 +02001086static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001087{
1088 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001089 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001090 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001091 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001092 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001093 MemoryRegionSection subsection = {
1094 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001095 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001097 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098
Avi Kivityf3705d52012-03-08 16:16:34 +02001099 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001100
Avi Kivityf3705d52012-03-08 16:16:34 +02001101 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001102 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001103 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001104 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001105 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001108 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109 }
1110 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001111 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001112 subpage_register(subpage, start, end,
1113 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001114}
1115
1116
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001117static void register_multipage(AddressSpaceDispatch *d,
1118 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001119{
Avi Kivitya8170e52012-10-23 12:30:10 +02001120 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001121 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001122 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1123 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001124
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001125 assert(num_pages);
1126 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001127}
1128
Avi Kivityac1970f2012-10-03 16:22:53 +02001129static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001130{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001131 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001132 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001133 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001134 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001136 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1137 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1138 - now.offset_within_address_space;
1139
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001140 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001141 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001142 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001144 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001145 while (int128_ne(remain.size, now.size)) {
1146 remain.size = int128_sub(remain.size, now.size);
1147 remain.offset_within_address_space += int128_get64(now.size);
1148 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001149 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001150 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001151 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001152 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001153 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001154 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001155 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001156 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001157 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001158 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 }
1160}
1161
Sheng Yang62a27442010-01-26 19:21:16 +08001162void qemu_flush_coalesced_mmio_buffer(void)
1163{
1164 if (kvm_enabled())
1165 kvm_flush_coalesced_mmio_buffer();
1166}
1167
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001168void qemu_mutex_lock_ramlist(void)
1169{
1170 qemu_mutex_lock(&ram_list.mutex);
1171}
1172
1173void qemu_mutex_unlock_ramlist(void)
1174{
1175 qemu_mutex_unlock(&ram_list.mutex);
1176}
1177
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001178#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001179
1180#include <sys/vfs.h>
1181
1182#define HUGETLBFS_MAGIC 0x958458f6
1183
Hu Taofc7a5802014-09-09 13:28:01 +08001184static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001185{
1186 struct statfs fs;
1187 int ret;
1188
1189 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001190 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001191 } while (ret != 0 && errno == EINTR);
1192
1193 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001194 error_setg_errno(errp, errno, "failed to get page size of file %s",
1195 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001196 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197 }
1198
1199 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001200 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001201
1202 return fs.f_bsize;
1203}
1204
Alex Williamson04b16652010-07-02 11:13:17 -06001205static void *file_ram_alloc(RAMBlock *block,
1206 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001207 const char *path,
1208 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001210 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001211 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001212 char *sanitized_name;
1213 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001214 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001215 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001216 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001217 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001218
Hu Taofc7a5802014-09-09 13:28:01 +08001219 hpagesize = gethugepagesize(path, &local_err);
1220 if (local_err) {
1221 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001222 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001223 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001224 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001225
1226 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001227 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1228 "or larger than huge page size 0x%" PRIx64,
1229 memory, hpagesize);
1230 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231 }
1232
1233 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001234 error_setg(errp,
1235 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001236 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237 }
1238
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001239 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1240 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1241 sanitized_name = g_strdup(memory_region_name(block->mr));
1242 for (c = sanitized_name; *c != '\0'; c++) {
1243 if (*c == '/') {
1244 *c = '_';
1245 }
1246 }
1247
1248 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1249 sanitized_name);
1250 g_free(sanitized_name);
1251
1252 fd = mkstemp(filename);
1253 if (fd >= 0) {
1254 unlink(filename);
1255 }
1256 g_free(filename);
1257 } else {
1258 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001259 }
1260
Marcelo Tosattic9027602010-03-01 20:25:08 -03001261 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001262 error_setg_errno(errp, errno,
1263 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001264 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001266
Chen Hanxiao9284f312015-07-24 11:12:03 +08001267 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001268
1269 /*
1270 * ftruncate is not supported by hugetlbfs in older
1271 * hosts, so don't bother bailing out on errors.
1272 * If anything goes wrong with it under other filesystems,
1273 * mmap will fail.
1274 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001275 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001276 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001277 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001278
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001279 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001280 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001281 error_setg_errno(errp, errno,
1282 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001283 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001284 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001285 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001286
1287 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001288 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001289 }
1290
Alex Williamson04b16652010-07-02 11:13:17 -06001291 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001292 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001293
1294error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001295 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001296}
1297#endif
1298
Mike Day0dc3f442013-09-05 14:41:35 -04001299/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001300static ram_addr_t find_ram_offset(ram_addr_t size)
1301{
Alex Williamson04b16652010-07-02 11:13:17 -06001302 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001303 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001304
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001305 assert(size != 0); /* it would hand out same offset multiple times */
1306
Mike Day0dc3f442013-09-05 14:41:35 -04001307 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001308 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001309 }
Alex Williamson04b16652010-07-02 11:13:17 -06001310
Mike Day0dc3f442013-09-05 14:41:35 -04001311 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001312 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001313
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001314 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001315
Mike Day0dc3f442013-09-05 14:41:35 -04001316 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001317 if (next_block->offset >= end) {
1318 next = MIN(next, next_block->offset);
1319 }
1320 }
1321 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001322 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001323 mingap = next - end;
1324 }
1325 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001326
1327 if (offset == RAM_ADDR_MAX) {
1328 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1329 (uint64_t)size);
1330 abort();
1331 }
1332
Alex Williamson04b16652010-07-02 11:13:17 -06001333 return offset;
1334}
1335
Juan Quintela652d7ec2012-07-20 10:37:54 +02001336ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001337{
Alex Williamsond17b5282010-06-25 11:08:38 -06001338 RAMBlock *block;
1339 ram_addr_t last = 0;
1340
Mike Day0dc3f442013-09-05 14:41:35 -04001341 rcu_read_lock();
1342 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001343 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001344 }
Mike Day0dc3f442013-09-05 14:41:35 -04001345 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001346 return last;
1347}
1348
Jason Baronddb97f12012-08-02 15:44:16 -04001349static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1350{
1351 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001352
1353 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001354 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001355 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1356 if (ret) {
1357 perror("qemu_madvise");
1358 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1359 "but dump_guest_core=off specified\n");
1360 }
1361 }
1362}
1363
Mike Day0dc3f442013-09-05 14:41:35 -04001364/* Called within an RCU critical section, or while the ramlist lock
1365 * is held.
1366 */
Hu Tao20cfe882014-04-02 15:13:26 +08001367static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001368{
Hu Tao20cfe882014-04-02 15:13:26 +08001369 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001370
Mike Day0dc3f442013-09-05 14:41:35 -04001371 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001372 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001373 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001374 }
1375 }
Hu Tao20cfe882014-04-02 15:13:26 +08001376
1377 return NULL;
1378}
1379
Mike Dayae3a7042013-09-05 14:41:35 -04001380/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001381void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1382{
Mike Dayae3a7042013-09-05 14:41:35 -04001383 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001384
Mike Day0dc3f442013-09-05 14:41:35 -04001385 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001386 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001387 assert(new_block);
1388 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001389
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001390 if (dev) {
1391 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001392 if (id) {
1393 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001394 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001395 }
1396 }
1397 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1398
Mike Day0dc3f442013-09-05 14:41:35 -04001399 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001400 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001401 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1402 new_block->idstr);
1403 abort();
1404 }
1405 }
Mike Day0dc3f442013-09-05 14:41:35 -04001406 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001407}
1408
Mike Dayae3a7042013-09-05 14:41:35 -04001409/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001410void qemu_ram_unset_idstr(ram_addr_t addr)
1411{
Mike Dayae3a7042013-09-05 14:41:35 -04001412 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001413
Mike Dayae3a7042013-09-05 14:41:35 -04001414 /* FIXME: arch_init.c assumes that this is not called throughout
1415 * migration. Ignore the problem since hot-unplug during migration
1416 * does not work anyway.
1417 */
1418
Mike Day0dc3f442013-09-05 14:41:35 -04001419 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001420 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001421 if (block) {
1422 memset(block->idstr, 0, sizeof(block->idstr));
1423 }
Mike Day0dc3f442013-09-05 14:41:35 -04001424 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001425}
1426
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001427static int memory_try_enable_merging(void *addr, size_t len)
1428{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001429 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001430 /* disabled by the user */
1431 return 0;
1432 }
1433
1434 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1435}
1436
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001437/* Only legal before guest might have detected the memory size: e.g. on
1438 * incoming migration, or right after reset.
1439 *
1440 * As memory core doesn't know how is memory accessed, it is up to
1441 * resize callback to update device state and/or add assertions to detect
1442 * misuse, if necessary.
1443 */
1444int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1445{
1446 RAMBlock *block = find_ram_block(base);
1447
1448 assert(block);
1449
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001450 newsize = TARGET_PAGE_ALIGN(newsize);
1451
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001452 if (block->used_length == newsize) {
1453 return 0;
1454 }
1455
1456 if (!(block->flags & RAM_RESIZEABLE)) {
1457 error_setg_errno(errp, EINVAL,
1458 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1459 " in != 0x" RAM_ADDR_FMT, block->idstr,
1460 newsize, block->used_length);
1461 return -EINVAL;
1462 }
1463
1464 if (block->max_length < newsize) {
1465 error_setg_errno(errp, EINVAL,
1466 "Length too large: %s: 0x" RAM_ADDR_FMT
1467 " > 0x" RAM_ADDR_FMT, block->idstr,
1468 newsize, block->max_length);
1469 return -EINVAL;
1470 }
1471
1472 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1473 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001474 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1475 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001476 memory_region_set_size(block->mr, newsize);
1477 if (block->resized) {
1478 block->resized(block->idstr, newsize, block->host);
1479 }
1480 return 0;
1481}
1482
Hu Taoef701d72014-09-09 13:27:54 +08001483static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001484{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001485 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001486 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001487 ram_addr_t old_ram_size, new_ram_size;
1488
1489 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001490
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001491 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001492 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001493
1494 if (!new_block->host) {
1495 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001496 xen_ram_alloc(new_block->offset, new_block->max_length,
1497 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001498 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001499 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001500 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001501 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001502 error_setg_errno(errp, errno,
1503 "cannot set up guest memory '%s'",
1504 memory_region_name(new_block->mr));
1505 qemu_mutex_unlock_ramlist();
1506 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001507 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001508 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001509 }
1510 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001511
Li Zhijiandd631692015-07-02 20:18:06 +08001512 new_ram_size = MAX(old_ram_size,
1513 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1514 if (new_ram_size > old_ram_size) {
1515 migration_bitmap_extend(old_ram_size, new_ram_size);
1516 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001517 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1518 * QLIST (which has an RCU-friendly variant) does not have insertion at
1519 * tail, so save the last element in last_block.
1520 */
Mike Day0dc3f442013-09-05 14:41:35 -04001521 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001522 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001523 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001524 break;
1525 }
1526 }
1527 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001528 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001529 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001530 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001531 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001533 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001534 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001535
Mike Day0dc3f442013-09-05 14:41:35 -04001536 /* Write list before version */
1537 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001538 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001539 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001540
Juan Quintela2152f5c2013-10-08 13:52:02 +02001541 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1542
1543 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001544 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001545
1546 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001547 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1548 ram_list.dirty_memory[i] =
1549 bitmap_zero_extend(ram_list.dirty_memory[i],
1550 old_ram_size, new_ram_size);
1551 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001552 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001553 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001554 new_block->used_length,
1555 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001556
Paolo Bonzinia904c912015-01-21 16:18:35 +01001557 if (new_block->host) {
1558 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1559 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1560 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1561 if (kvm_enabled()) {
1562 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1563 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001564 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001565
1566 return new_block->offset;
1567}
1568
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001569#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001570ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001571 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001572 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001573{
1574 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001575 ram_addr_t addr;
1576 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577
1578 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001579 error_setg(errp, "-mem-path not supported with Xen");
1580 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581 }
1582
1583 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1584 /*
1585 * file_ram_alloc() needs to allocate just like
1586 * phys_mem_alloc, but we haven't bothered to provide
1587 * a hook there.
1588 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001589 error_setg(errp,
1590 "-mem-path not supported with this accelerator");
1591 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592 }
1593
1594 size = TARGET_PAGE_ALIGN(size);
1595 new_block = g_malloc0(sizeof(*new_block));
1596 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001597 new_block->used_length = size;
1598 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001599 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001600 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001601 new_block->host = file_ram_alloc(new_block, size,
1602 mem_path, errp);
1603 if (!new_block->host) {
1604 g_free(new_block);
1605 return -1;
1606 }
1607
Hu Taoef701d72014-09-09 13:27:54 +08001608 addr = ram_block_add(new_block, &local_err);
1609 if (local_err) {
1610 g_free(new_block);
1611 error_propagate(errp, local_err);
1612 return -1;
1613 }
1614 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001615}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001616#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001617
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001618static
1619ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1620 void (*resized)(const char*,
1621 uint64_t length,
1622 void *host),
1623 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001624 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001625{
1626 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001627 ram_addr_t addr;
1628 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001629
1630 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001631 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001632 new_block = g_malloc0(sizeof(*new_block));
1633 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001634 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001635 new_block->used_length = size;
1636 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001637 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001638 new_block->fd = -1;
1639 new_block->host = host;
1640 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001641 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001643 if (resizeable) {
1644 new_block->flags |= RAM_RESIZEABLE;
1645 }
Hu Taoef701d72014-09-09 13:27:54 +08001646 addr = ram_block_add(new_block, &local_err);
1647 if (local_err) {
1648 g_free(new_block);
1649 error_propagate(errp, local_err);
1650 return -1;
1651 }
1652 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001653}
1654
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001655ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1656 MemoryRegion *mr, Error **errp)
1657{
1658 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1659}
1660
Hu Taoef701d72014-09-09 13:27:54 +08001661ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001662{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001663 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1664}
1665
1666ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1667 void (*resized)(const char*,
1668 uint64_t length,
1669 void *host),
1670 MemoryRegion *mr, Error **errp)
1671{
1672 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001673}
bellarde9a1ab12007-02-08 23:08:38 +00001674
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001675void qemu_ram_free_from_ptr(ram_addr_t addr)
1676{
1677 RAMBlock *block;
1678
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001679 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001680 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001681 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001682 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001683 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001684 /* Write list before version */
1685 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001686 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001687 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001688 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001689 }
1690 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001691 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001692}
1693
Paolo Bonzini43771532013-09-09 17:58:40 +02001694static void reclaim_ramblock(RAMBlock *block)
1695{
1696 if (block->flags & RAM_PREALLOC) {
1697 ;
1698 } else if (xen_enabled()) {
1699 xen_invalidate_map_cache_entry(block->host);
1700#ifndef _WIN32
1701 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001702 if (block->flags & RAM_FILE) {
1703 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001704 } else {
1705 munmap(block->host, block->max_length);
1706 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001707 close(block->fd);
1708#endif
1709 } else {
1710 qemu_anon_ram_free(block->host, block->max_length);
1711 }
1712 g_free(block);
1713}
1714
Anthony Liguoric227f092009-10-01 16:12:16 -05001715void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001716{
Alex Williamson04b16652010-07-02 11:13:17 -06001717 RAMBlock *block;
1718
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001719 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001720 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001721 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001722 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001723 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001724 /* Write list before version */
1725 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001726 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001727 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001728 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001729 }
1730 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001731 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001732}
1733
Huang Yingcd19cfa2011-03-02 08:56:19 +01001734#ifndef _WIN32
1735void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1736{
1737 RAMBlock *block;
1738 ram_addr_t offset;
1739 int flags;
1740 void *area, *vaddr;
1741
Mike Day0dc3f442013-09-05 14:41:35 -04001742 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001743 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001744 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001745 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001746 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001747 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001748 } else if (xen_enabled()) {
1749 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001750 } else {
1751 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001752 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001753 flags |= (block->flags & RAM_SHARED ?
1754 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001755 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1756 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001757 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001758 /*
1759 * Remap needs to match alloc. Accelerators that
1760 * set phys_mem_alloc never remap. If they did,
1761 * we'd need a remap hook here.
1762 */
1763 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1764
Huang Yingcd19cfa2011-03-02 08:56:19 +01001765 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1766 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1767 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001768 }
1769 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001770 fprintf(stderr, "Could not remap addr: "
1771 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001772 length, addr);
1773 exit(1);
1774 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001775 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001776 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001777 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001778 }
1779 }
1780}
1781#endif /* !_WIN32 */
1782
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001783int qemu_get_ram_fd(ram_addr_t addr)
1784{
Mike Dayae3a7042013-09-05 14:41:35 -04001785 RAMBlock *block;
1786 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001787
Mike Day0dc3f442013-09-05 14:41:35 -04001788 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001789 block = qemu_get_ram_block(addr);
1790 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001791 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001792 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001793}
1794
Damjan Marion3fd74b82014-06-26 23:01:32 +02001795void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1796{
Mike Dayae3a7042013-09-05 14:41:35 -04001797 RAMBlock *block;
1798 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001799
Mike Day0dc3f442013-09-05 14:41:35 -04001800 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001801 block = qemu_get_ram_block(addr);
1802 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001803 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001804 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001805}
1806
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001807/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001808 * This should not be used for general purpose DMA. Use address_space_map
1809 * or address_space_rw instead. For local memory (e.g. video ram) that the
1810 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001811 *
1812 * By the time this function returns, the returned pointer is not protected
1813 * by RCU anymore. If the caller is not within an RCU critical section and
1814 * does not hold the iothread lock, it must have other means of protecting the
1815 * pointer, such as a reference to the region that includes the incoming
1816 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001817 */
1818void *qemu_get_ram_ptr(ram_addr_t addr)
1819{
Mike Dayae3a7042013-09-05 14:41:35 -04001820 RAMBlock *block;
1821 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001822
Mike Day0dc3f442013-09-05 14:41:35 -04001823 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001824 block = qemu_get_ram_block(addr);
1825
1826 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001827 /* We need to check if the requested address is in the RAM
1828 * because we don't want to map the entire memory in QEMU.
1829 * In that case just map until the end of the page.
1830 */
1831 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001832 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001833 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001834 }
Mike Dayae3a7042013-09-05 14:41:35 -04001835
1836 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001837 }
Mike Dayae3a7042013-09-05 14:41:35 -04001838 ptr = ramblock_ptr(block, addr - block->offset);
1839
Mike Day0dc3f442013-09-05 14:41:35 -04001840unlock:
1841 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001842 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001843}
1844
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001845/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001846 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001847 *
1848 * By the time this function returns, the returned pointer is not protected
1849 * by RCU anymore. If the caller is not within an RCU critical section and
1850 * does not hold the iothread lock, it must have other means of protecting the
1851 * pointer, such as a reference to the region that includes the incoming
1852 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001853 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001854static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001855{
Mike Dayae3a7042013-09-05 14:41:35 -04001856 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001857 if (*size == 0) {
1858 return NULL;
1859 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001860 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001861 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001862 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001863 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001864 rcu_read_lock();
1865 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001866 if (addr - block->offset < block->max_length) {
1867 if (addr - block->offset + *size > block->max_length)
1868 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001869 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001870 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001871 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001872 }
1873 }
1874
1875 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1876 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001877 }
1878}
1879
Paolo Bonzini7443b432013-06-03 12:44:02 +02001880/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001881 * (typically a TLB entry) back to a ram offset.
1882 *
1883 * By the time this function returns, the returned pointer is not protected
1884 * by RCU anymore. If the caller is not within an RCU critical section and
1885 * does not hold the iothread lock, it must have other means of protecting the
1886 * pointer, such as a reference to the region that includes the incoming
1887 * ram_addr_t.
1888 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001889MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001890{
pbrook94a6b542009-04-11 17:15:54 +00001891 RAMBlock *block;
1892 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001893 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001894
Jan Kiszka868bb332011-06-21 22:59:09 +02001895 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001896 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001897 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001898 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001899 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001900 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001901 }
1902
Mike Day0dc3f442013-09-05 14:41:35 -04001903 rcu_read_lock();
1904 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001905 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001906 goto found;
1907 }
1908
Mike Day0dc3f442013-09-05 14:41:35 -04001909 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001910 /* This case append when the block is not mapped. */
1911 if (block->host == NULL) {
1912 continue;
1913 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001914 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001915 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001916 }
pbrook94a6b542009-04-11 17:15:54 +00001917 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001918
Mike Day0dc3f442013-09-05 14:41:35 -04001919 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001920 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001921
1922found:
1923 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001924 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001925 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001926 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001927}
Alex Williamsonf471a172010-06-11 11:11:42 -06001928
Avi Kivitya8170e52012-10-23 12:30:10 +02001929static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001930 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001931{
Juan Quintela52159192013-10-08 12:44:04 +02001932 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001933 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001934 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001935 switch (size) {
1936 case 1:
1937 stb_p(qemu_get_ram_ptr(ram_addr), val);
1938 break;
1939 case 2:
1940 stw_p(qemu_get_ram_ptr(ram_addr), val);
1941 break;
1942 case 4:
1943 stl_p(qemu_get_ram_ptr(ram_addr), val);
1944 break;
1945 default:
1946 abort();
1947 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001948 /* Set both VGA and migration bits for simplicity and to remove
1949 * the notdirty callback faster.
1950 */
1951 cpu_physical_memory_set_dirty_range(ram_addr, size,
1952 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001953 /* we remove the notdirty callback only if the code has been
1954 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001955 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001956 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001957 }
bellard1ccde1c2004-02-06 19:46:14 +00001958}
1959
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001960static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1961 unsigned size, bool is_write)
1962{
1963 return is_write;
1964}
1965
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001966static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001967 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001968 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001969 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001970};
1971
pbrook0f459d12008-06-09 00:20:13 +00001972/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001973static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001974{
Andreas Färber93afead2013-08-26 03:41:01 +02001975 CPUState *cpu = current_cpu;
1976 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001977 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001978 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001979 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001980 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001981
Andreas Färberff4700b2013-08-26 18:23:18 +02001982 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001983 /* We re-entered the check after replacing the TB. Now raise
1984 * the debug interrupt so that is will trigger after the
1985 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001986 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001987 return;
1988 }
Andreas Färber93afead2013-08-26 03:41:01 +02001989 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001990 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001991 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1992 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001993 if (flags == BP_MEM_READ) {
1994 wp->flags |= BP_WATCHPOINT_HIT_READ;
1995 } else {
1996 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1997 }
1998 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001999 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002000 if (!cpu->watchpoint_hit) {
2001 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002002 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002003 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002004 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002005 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002006 } else {
2007 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002008 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002009 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002010 }
aliguori06d55cc2008-11-18 20:24:06 +00002011 }
aliguori6e140f22008-11-18 20:37:55 +00002012 } else {
2013 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002014 }
2015 }
2016}
2017
pbrook6658ffb2007-03-16 23:58:11 +00002018/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2019 so these check for a hit then pass through to the normal out-of-line
2020 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002021static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2022 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002023{
Peter Maydell66b9b432015-04-26 16:49:24 +01002024 MemTxResult res;
2025 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002026
Peter Maydell66b9b432015-04-26 16:49:24 +01002027 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002028 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002029 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002030 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002031 break;
2032 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002033 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002034 break;
2035 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002036 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002037 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002038 default: abort();
2039 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002040 *pdata = data;
2041 return res;
2042}
2043
2044static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2045 uint64_t val, unsigned size,
2046 MemTxAttrs attrs)
2047{
2048 MemTxResult res;
2049
2050 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2051 switch (size) {
2052 case 1:
2053 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2054 break;
2055 case 2:
2056 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2057 break;
2058 case 4:
2059 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2060 break;
2061 default: abort();
2062 }
2063 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002064}
2065
Avi Kivity1ec9b902012-01-02 12:47:48 +02002066static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002067 .read_with_attrs = watch_mem_read,
2068 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002069 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002070};
pbrook6658ffb2007-03-16 23:58:11 +00002071
Peter Maydellf25a49e2015-04-26 16:49:24 +01002072static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2073 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002074{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002075 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002076 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002077 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002078
blueswir1db7b5422007-05-26 17:36:03 +00002079#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002080 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002081 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002082#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002083 res = address_space_read(subpage->as, addr + subpage->base,
2084 attrs, buf, len);
2085 if (res) {
2086 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002087 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002088 switch (len) {
2089 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002090 *data = ldub_p(buf);
2091 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002092 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002093 *data = lduw_p(buf);
2094 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002095 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002096 *data = ldl_p(buf);
2097 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002098 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002099 *data = ldq_p(buf);
2100 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002101 default:
2102 abort();
2103 }
blueswir1db7b5422007-05-26 17:36:03 +00002104}
2105
Peter Maydellf25a49e2015-04-26 16:49:24 +01002106static MemTxResult subpage_write(void *opaque, hwaddr addr,
2107 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002108{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002109 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002110 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002111
blueswir1db7b5422007-05-26 17:36:03 +00002112#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002113 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002114 " value %"PRIx64"\n",
2115 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002116#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002117 switch (len) {
2118 case 1:
2119 stb_p(buf, value);
2120 break;
2121 case 2:
2122 stw_p(buf, value);
2123 break;
2124 case 4:
2125 stl_p(buf, value);
2126 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002127 case 8:
2128 stq_p(buf, value);
2129 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002130 default:
2131 abort();
2132 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002133 return address_space_write(subpage->as, addr + subpage->base,
2134 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002135}
2136
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002137static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002138 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002139{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002140 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002141#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002142 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002143 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002144#endif
2145
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002146 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002147 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002148}
2149
Avi Kivity70c68e42012-01-02 12:32:48 +02002150static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002151 .read_with_attrs = subpage_read,
2152 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002153 .impl.min_access_size = 1,
2154 .impl.max_access_size = 8,
2155 .valid.min_access_size = 1,
2156 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002157 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002158 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002159};
2160
Anthony Liguoric227f092009-10-01 16:12:16 -05002161static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002162 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002163{
2164 int idx, eidx;
2165
2166 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2167 return -1;
2168 idx = SUBPAGE_IDX(start);
2169 eidx = SUBPAGE_IDX(end);
2170#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002171 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2172 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002173#endif
blueswir1db7b5422007-05-26 17:36:03 +00002174 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002175 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002176 }
2177
2178 return 0;
2179}
2180
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002181static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002182{
Anthony Liguoric227f092009-10-01 16:12:16 -05002183 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002184
Anthony Liguori7267c092011-08-20 22:09:37 -05002185 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002186
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002187 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002188 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002189 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002190 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002191 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002192#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002193 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2194 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002195#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002196 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002197
2198 return mmio;
2199}
2200
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002201static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2202 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002203{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002204 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002205 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002206 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002207 .mr = mr,
2208 .offset_within_address_space = 0,
2209 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002210 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002211 };
2212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002213 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002214}
2215
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002216MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002217{
Peter Maydell32857f42015-10-01 15:29:50 +01002218 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2219 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002220 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002221
2222 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002223}
2224
Avi Kivitye9179ce2009-06-14 11:38:52 +03002225static void io_mem_init(void)
2226{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002227 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002228 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002229 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002230 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002231 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002232 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002233 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002234}
2235
Avi Kivityac1970f2012-10-03 16:22:53 +02002236static void mem_begin(MemoryListener *listener)
2237{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002238 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002239 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2240 uint16_t n;
2241
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002242 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002243 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002244 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002245 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002246 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002247 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002248 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002249 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002250
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002251 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002252 d->as = as;
2253 as->next_dispatch = d;
2254}
2255
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002256static void address_space_dispatch_free(AddressSpaceDispatch *d)
2257{
2258 phys_sections_free(&d->map);
2259 g_free(d);
2260}
2261
Paolo Bonzini00752702013-05-29 12:13:54 +02002262static void mem_commit(MemoryListener *listener)
2263{
2264 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002265 AddressSpaceDispatch *cur = as->dispatch;
2266 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002267
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002268 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002269
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002270 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002271 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002272 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002273 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002274}
2275
Avi Kivity1d711482012-10-02 18:54:45 +02002276static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002277{
Peter Maydell32857f42015-10-01 15:29:50 +01002278 CPUAddressSpace *cpuas;
2279 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002280
2281 /* since each CPU stores ram addresses in its TLB cache, we must
2282 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002283 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2284 cpu_reloading_memory_map();
2285 /* The CPU and TLB are protected by the iothread lock.
2286 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2287 * may have split the RCU critical section.
2288 */
2289 d = atomic_rcu_read(&cpuas->as->dispatch);
2290 cpuas->memory_dispatch = d;
2291 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002292}
2293
Avi Kivityac1970f2012-10-03 16:22:53 +02002294void address_space_init_dispatch(AddressSpace *as)
2295{
Paolo Bonzini00752702013-05-29 12:13:54 +02002296 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002297 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002298 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002299 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002300 .region_add = mem_add,
2301 .region_nop = mem_add,
2302 .priority = 0,
2303 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002304 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002305}
2306
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002307void address_space_unregister(AddressSpace *as)
2308{
2309 memory_listener_unregister(&as->dispatch_listener);
2310}
2311
Avi Kivity83f3c252012-10-07 12:59:55 +02002312void address_space_destroy_dispatch(AddressSpace *as)
2313{
2314 AddressSpaceDispatch *d = as->dispatch;
2315
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002316 atomic_rcu_set(&as->dispatch, NULL);
2317 if (d) {
2318 call_rcu(d, address_space_dispatch_free, rcu);
2319 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002320}
2321
Avi Kivity62152b82011-07-26 14:26:14 +03002322static void memory_map_init(void)
2323{
Anthony Liguori7267c092011-08-20 22:09:37 -05002324 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002325
Paolo Bonzini57271d62013-11-07 17:14:37 +01002326 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002327 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002328
Anthony Liguori7267c092011-08-20 22:09:37 -05002329 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002330 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2331 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002332 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002333}
2334
2335MemoryRegion *get_system_memory(void)
2336{
2337 return system_memory;
2338}
2339
Avi Kivity309cb472011-08-08 16:09:03 +03002340MemoryRegion *get_system_io(void)
2341{
2342 return system_io;
2343}
2344
pbrooke2eef172008-06-08 01:09:01 +00002345#endif /* !defined(CONFIG_USER_ONLY) */
2346
bellard13eb76e2004-01-24 15:23:36 +00002347/* physical memory access (slow version, mainly for debug) */
2348#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002349int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002350 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002351{
2352 int l, flags;
2353 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002354 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002355
2356 while (len > 0) {
2357 page = addr & TARGET_PAGE_MASK;
2358 l = (page + TARGET_PAGE_SIZE) - addr;
2359 if (l > len)
2360 l = len;
2361 flags = page_get_flags(page);
2362 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002363 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002364 if (is_write) {
2365 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002366 return -1;
bellard579a97f2007-11-11 14:26:47 +00002367 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002368 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002369 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002370 memcpy(p, buf, l);
2371 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002372 } else {
2373 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002374 return -1;
bellard579a97f2007-11-11 14:26:47 +00002375 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002376 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002377 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002378 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002379 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002380 }
2381 len -= l;
2382 buf += l;
2383 addr += l;
2384 }
Paul Brooka68fe892010-03-01 00:08:59 +00002385 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002386}
bellard8df1cd02005-01-28 22:37:22 +00002387
bellard13eb76e2004-01-24 15:23:36 +00002388#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002389
Paolo Bonzini845b6212015-03-23 11:45:53 +01002390static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002391 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002392{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002393 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2394 /* No early return if dirty_log_mask is or becomes 0, because
2395 * cpu_physical_memory_set_dirty_range will still call
2396 * xen_modified_memory.
2397 */
2398 if (dirty_log_mask) {
2399 dirty_log_mask =
2400 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002401 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002402 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2403 tb_invalidate_phys_range(addr, addr + length);
2404 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2405 }
2406 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002407}
2408
Richard Henderson23326162013-07-08 14:55:59 -07002409static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002410{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002411 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002412
2413 /* Regions are assumed to support 1-4 byte accesses unless
2414 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002415 if (access_size_max == 0) {
2416 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002417 }
Richard Henderson23326162013-07-08 14:55:59 -07002418
2419 /* Bound the maximum access by the alignment of the address. */
2420 if (!mr->ops->impl.unaligned) {
2421 unsigned align_size_max = addr & -addr;
2422 if (align_size_max != 0 && align_size_max < access_size_max) {
2423 access_size_max = align_size_max;
2424 }
2425 }
2426
2427 /* Don't attempt accesses larger than the maximum. */
2428 if (l > access_size_max) {
2429 l = access_size_max;
2430 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002431 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002432
2433 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002434}
2435
Jan Kiszka4840f102015-06-18 18:47:22 +02002436static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002437{
Jan Kiszka4840f102015-06-18 18:47:22 +02002438 bool unlocked = !qemu_mutex_iothread_locked();
2439 bool release_lock = false;
2440
2441 if (unlocked && mr->global_locking) {
2442 qemu_mutex_lock_iothread();
2443 unlocked = false;
2444 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002445 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002446 if (mr->flush_coalesced_mmio) {
2447 if (unlocked) {
2448 qemu_mutex_lock_iothread();
2449 }
2450 qemu_flush_coalesced_mmio_buffer();
2451 if (unlocked) {
2452 qemu_mutex_unlock_iothread();
2453 }
2454 }
2455
2456 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002457}
2458
Peter Maydell5c9eb022015-04-26 16:49:24 +01002459MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2460 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002461{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002462 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002463 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002464 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002465 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002466 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002467 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002468 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002469
Paolo Bonzini41063e12015-03-18 14:21:43 +01002470 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002471 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002472 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002473 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002474
bellard13eb76e2004-01-24 15:23:36 +00002475 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002476 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002477 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002478 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002479 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002480 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002481 switch (l) {
2482 case 8:
2483 /* 64 bit write access */
2484 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002485 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2486 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002487 break;
2488 case 4:
bellard1c213d12005-09-03 10:49:04 +00002489 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002490 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002491 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2492 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002493 break;
2494 case 2:
bellard1c213d12005-09-03 10:49:04 +00002495 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002496 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002497 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2498 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002499 break;
2500 case 1:
bellard1c213d12005-09-03 10:49:04 +00002501 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002502 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002503 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2504 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002505 break;
2506 default:
2507 abort();
bellard13eb76e2004-01-24 15:23:36 +00002508 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002509 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002510 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002511 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002512 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002513 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002514 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002515 }
2516 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002517 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002518 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002519 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002520 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002521 switch (l) {
2522 case 8:
2523 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002524 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2525 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002526 stq_p(buf, val);
2527 break;
2528 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002529 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002530 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2531 attrs);
bellardc27004e2005-01-03 23:35:10 +00002532 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002533 break;
2534 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002535 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002536 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2537 attrs);
bellardc27004e2005-01-03 23:35:10 +00002538 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002539 break;
2540 case 1:
bellard1c213d12005-09-03 10:49:04 +00002541 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002542 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2543 attrs);
bellardc27004e2005-01-03 23:35:10 +00002544 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002545 break;
2546 default:
2547 abort();
bellard13eb76e2004-01-24 15:23:36 +00002548 }
2549 } else {
2550 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002551 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002552 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002553 }
2554 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002555
2556 if (release_lock) {
2557 qemu_mutex_unlock_iothread();
2558 release_lock = false;
2559 }
2560
bellard13eb76e2004-01-24 15:23:36 +00002561 len -= l;
2562 buf += l;
2563 addr += l;
2564 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002565 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002566
Peter Maydell3b643492015-04-26 16:49:23 +01002567 return result;
bellard13eb76e2004-01-24 15:23:36 +00002568}
bellard8df1cd02005-01-28 22:37:22 +00002569
Peter Maydell5c9eb022015-04-26 16:49:24 +01002570MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2571 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002572{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002573 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002574}
2575
Peter Maydell5c9eb022015-04-26 16:49:24 +01002576MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2577 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002578{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002579 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002580}
2581
2582
Avi Kivitya8170e52012-10-23 12:30:10 +02002583void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002584 int len, int is_write)
2585{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002586 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2587 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002588}
2589
Alexander Graf582b55a2013-12-11 14:17:44 +01002590enum write_rom_type {
2591 WRITE_DATA,
2592 FLUSH_CACHE,
2593};
2594
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002595static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002596 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002597{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002598 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002599 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002600 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002601 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002602
Paolo Bonzini41063e12015-03-18 14:21:43 +01002603 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002604 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002605 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002606 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002607
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002608 if (!(memory_region_is_ram(mr) ||
2609 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002610 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002611 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002612 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002613 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002614 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002615 switch (type) {
2616 case WRITE_DATA:
2617 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002618 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002619 break;
2620 case FLUSH_CACHE:
2621 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2622 break;
2623 }
bellardd0ecd2a2006-04-23 17:14:48 +00002624 }
2625 len -= l;
2626 buf += l;
2627 addr += l;
2628 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002629 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002630}
2631
Alexander Graf582b55a2013-12-11 14:17:44 +01002632/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002633void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002634 const uint8_t *buf, int len)
2635{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002636 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002637}
2638
2639void cpu_flush_icache_range(hwaddr start, int len)
2640{
2641 /*
2642 * This function should do the same thing as an icache flush that was
2643 * triggered from within the guest. For TCG we are always cache coherent,
2644 * so there is no need to flush anything. For KVM / Xen we need to flush
2645 * the host's instruction cache at least.
2646 */
2647 if (tcg_enabled()) {
2648 return;
2649 }
2650
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002651 cpu_physical_memory_write_rom_internal(&address_space_memory,
2652 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002653}
2654
aliguori6d16c2f2009-01-22 16:59:11 +00002655typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002656 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002657 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002658 hwaddr addr;
2659 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002660 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002661} BounceBuffer;
2662
2663static BounceBuffer bounce;
2664
aliguoriba223c22009-01-22 16:59:16 +00002665typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002666 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002667 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002668} MapClient;
2669
Fam Zheng38e047b2015-03-16 17:03:35 +08002670QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002671static QLIST_HEAD(map_client_list, MapClient) map_client_list
2672 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002673
Fam Zhenge95205e2015-03-16 17:03:37 +08002674static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002675{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002676 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002677 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002678}
2679
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002680static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002681{
2682 MapClient *client;
2683
Blue Swirl72cf2d42009-09-12 07:36:22 +00002684 while (!QLIST_EMPTY(&map_client_list)) {
2685 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002686 qemu_bh_schedule(client->bh);
2687 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002688 }
2689}
2690
Fam Zhenge95205e2015-03-16 17:03:37 +08002691void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002692{
2693 MapClient *client = g_malloc(sizeof(*client));
2694
Fam Zheng38e047b2015-03-16 17:03:35 +08002695 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002696 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002697 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002698 if (!atomic_read(&bounce.in_use)) {
2699 cpu_notify_map_clients_locked();
2700 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002701 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002702}
2703
Fam Zheng38e047b2015-03-16 17:03:35 +08002704void cpu_exec_init_all(void)
2705{
2706 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002707 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002708 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002709 qemu_mutex_init(&map_client_list_lock);
2710}
2711
Fam Zhenge95205e2015-03-16 17:03:37 +08002712void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002713{
Fam Zhenge95205e2015-03-16 17:03:37 +08002714 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002715
Fam Zhenge95205e2015-03-16 17:03:37 +08002716 qemu_mutex_lock(&map_client_list_lock);
2717 QLIST_FOREACH(client, &map_client_list, link) {
2718 if (client->bh == bh) {
2719 cpu_unregister_map_client_do(client);
2720 break;
2721 }
2722 }
2723 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002724}
2725
2726static void cpu_notify_map_clients(void)
2727{
Fam Zheng38e047b2015-03-16 17:03:35 +08002728 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002729 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002730 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002731}
2732
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002733bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2734{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002735 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002736 hwaddr l, xlat;
2737
Paolo Bonzini41063e12015-03-18 14:21:43 +01002738 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002739 while (len > 0) {
2740 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002741 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2742 if (!memory_access_is_direct(mr, is_write)) {
2743 l = memory_access_size(mr, l, addr);
2744 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002745 return false;
2746 }
2747 }
2748
2749 len -= l;
2750 addr += l;
2751 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002752 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002753 return true;
2754}
2755
aliguori6d16c2f2009-01-22 16:59:11 +00002756/* Map a physical memory region into a host virtual address.
2757 * May map a subset of the requested range, given by and returned in *plen.
2758 * May return NULL if resources needed to perform the mapping are exhausted.
2759 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002760 * Use cpu_register_map_client() to know when retrying the map operation is
2761 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002762 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002763void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002764 hwaddr addr,
2765 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002766 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002767{
Avi Kivitya8170e52012-10-23 12:30:10 +02002768 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002769 hwaddr done = 0;
2770 hwaddr l, xlat, base;
2771 MemoryRegion *mr, *this_mr;
2772 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002773
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002774 if (len == 0) {
2775 return NULL;
2776 }
aliguori6d16c2f2009-01-22 16:59:11 +00002777
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002778 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002779 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002780 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002781
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002782 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002783 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002784 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002785 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002786 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002787 /* Avoid unbounded allocations */
2788 l = MIN(l, TARGET_PAGE_SIZE);
2789 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002790 bounce.addr = addr;
2791 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002792
2793 memory_region_ref(mr);
2794 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002795 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002796 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2797 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002798 }
aliguori6d16c2f2009-01-22 16:59:11 +00002799
Paolo Bonzini41063e12015-03-18 14:21:43 +01002800 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002801 *plen = l;
2802 return bounce.buffer;
2803 }
2804
2805 base = xlat;
2806 raddr = memory_region_get_ram_addr(mr);
2807
2808 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002809 len -= l;
2810 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002811 done += l;
2812 if (len == 0) {
2813 break;
2814 }
2815
2816 l = len;
2817 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2818 if (this_mr != mr || xlat != base + done) {
2819 break;
2820 }
aliguori6d16c2f2009-01-22 16:59:11 +00002821 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002822
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002823 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002824 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002825 *plen = done;
2826 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002827}
2828
Avi Kivityac1970f2012-10-03 16:22:53 +02002829/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002830 * Will also mark the memory as dirty if is_write == 1. access_len gives
2831 * the amount of memory that was actually read or written by the caller.
2832 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002833void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2834 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002835{
2836 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002837 MemoryRegion *mr;
2838 ram_addr_t addr1;
2839
2840 mr = qemu_ram_addr_from_host(buffer, &addr1);
2841 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002842 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002843 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002844 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002845 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002846 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002847 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002848 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002849 return;
2850 }
2851 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002852 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2853 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002854 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002855 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002856 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002857 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002858 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002859 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002860}
bellardd0ecd2a2006-04-23 17:14:48 +00002861
Avi Kivitya8170e52012-10-23 12:30:10 +02002862void *cpu_physical_memory_map(hwaddr addr,
2863 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002864 int is_write)
2865{
2866 return address_space_map(&address_space_memory, addr, plen, is_write);
2867}
2868
Avi Kivitya8170e52012-10-23 12:30:10 +02002869void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2870 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002871{
2872 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2873}
2874
bellard8df1cd02005-01-28 22:37:22 +00002875/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002876static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2877 MemTxAttrs attrs,
2878 MemTxResult *result,
2879 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002880{
bellard8df1cd02005-01-28 22:37:22 +00002881 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002882 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002883 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002884 hwaddr l = 4;
2885 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002886 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002887 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002888
Paolo Bonzini41063e12015-03-18 14:21:43 +01002889 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002890 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002891 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002892 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002893
bellard8df1cd02005-01-28 22:37:22 +00002894 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002895 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002896#if defined(TARGET_WORDS_BIGENDIAN)
2897 if (endian == DEVICE_LITTLE_ENDIAN) {
2898 val = bswap32(val);
2899 }
2900#else
2901 if (endian == DEVICE_BIG_ENDIAN) {
2902 val = bswap32(val);
2903 }
2904#endif
bellard8df1cd02005-01-28 22:37:22 +00002905 } else {
2906 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002907 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002908 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002909 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002910 switch (endian) {
2911 case DEVICE_LITTLE_ENDIAN:
2912 val = ldl_le_p(ptr);
2913 break;
2914 case DEVICE_BIG_ENDIAN:
2915 val = ldl_be_p(ptr);
2916 break;
2917 default:
2918 val = ldl_p(ptr);
2919 break;
2920 }
Peter Maydell50013112015-04-26 16:49:24 +01002921 r = MEMTX_OK;
2922 }
2923 if (result) {
2924 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002925 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002926 if (release_lock) {
2927 qemu_mutex_unlock_iothread();
2928 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002929 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002930 return val;
2931}
2932
Peter Maydell50013112015-04-26 16:49:24 +01002933uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2934 MemTxAttrs attrs, MemTxResult *result)
2935{
2936 return address_space_ldl_internal(as, addr, attrs, result,
2937 DEVICE_NATIVE_ENDIAN);
2938}
2939
2940uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2941 MemTxAttrs attrs, MemTxResult *result)
2942{
2943 return address_space_ldl_internal(as, addr, attrs, result,
2944 DEVICE_LITTLE_ENDIAN);
2945}
2946
2947uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2948 MemTxAttrs attrs, MemTxResult *result)
2949{
2950 return address_space_ldl_internal(as, addr, attrs, result,
2951 DEVICE_BIG_ENDIAN);
2952}
2953
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002954uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002955{
Peter Maydell50013112015-04-26 16:49:24 +01002956 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002957}
2958
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002959uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002960{
Peter Maydell50013112015-04-26 16:49:24 +01002961 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002962}
2963
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002964uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002965{
Peter Maydell50013112015-04-26 16:49:24 +01002966 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002967}
2968
bellard84b7b8e2005-11-28 21:19:04 +00002969/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002970static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2971 MemTxAttrs attrs,
2972 MemTxResult *result,
2973 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002974{
bellard84b7b8e2005-11-28 21:19:04 +00002975 uint8_t *ptr;
2976 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002977 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002978 hwaddr l = 8;
2979 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002980 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002981 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002982
Paolo Bonzini41063e12015-03-18 14:21:43 +01002983 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002984 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002985 false);
2986 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002987 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002988
bellard84b7b8e2005-11-28 21:19:04 +00002989 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002990 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002991#if defined(TARGET_WORDS_BIGENDIAN)
2992 if (endian == DEVICE_LITTLE_ENDIAN) {
2993 val = bswap64(val);
2994 }
2995#else
2996 if (endian == DEVICE_BIG_ENDIAN) {
2997 val = bswap64(val);
2998 }
2999#endif
bellard84b7b8e2005-11-28 21:19:04 +00003000 } else {
3001 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003002 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003003 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003004 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003005 switch (endian) {
3006 case DEVICE_LITTLE_ENDIAN:
3007 val = ldq_le_p(ptr);
3008 break;
3009 case DEVICE_BIG_ENDIAN:
3010 val = ldq_be_p(ptr);
3011 break;
3012 default:
3013 val = ldq_p(ptr);
3014 break;
3015 }
Peter Maydell50013112015-04-26 16:49:24 +01003016 r = MEMTX_OK;
3017 }
3018 if (result) {
3019 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003020 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003021 if (release_lock) {
3022 qemu_mutex_unlock_iothread();
3023 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003024 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003025 return val;
3026}
3027
Peter Maydell50013112015-04-26 16:49:24 +01003028uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3029 MemTxAttrs attrs, MemTxResult *result)
3030{
3031 return address_space_ldq_internal(as, addr, attrs, result,
3032 DEVICE_NATIVE_ENDIAN);
3033}
3034
3035uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3036 MemTxAttrs attrs, MemTxResult *result)
3037{
3038 return address_space_ldq_internal(as, addr, attrs, result,
3039 DEVICE_LITTLE_ENDIAN);
3040}
3041
3042uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3043 MemTxAttrs attrs, MemTxResult *result)
3044{
3045 return address_space_ldq_internal(as, addr, attrs, result,
3046 DEVICE_BIG_ENDIAN);
3047}
3048
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003049uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003050{
Peter Maydell50013112015-04-26 16:49:24 +01003051 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003052}
3053
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003054uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003055{
Peter Maydell50013112015-04-26 16:49:24 +01003056 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003057}
3058
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003059uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003060{
Peter Maydell50013112015-04-26 16:49:24 +01003061 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003062}
3063
bellardaab33092005-10-30 20:48:42 +00003064/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003065uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3066 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003067{
3068 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003069 MemTxResult r;
3070
3071 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3072 if (result) {
3073 *result = r;
3074 }
bellardaab33092005-10-30 20:48:42 +00003075 return val;
3076}
3077
Peter Maydell50013112015-04-26 16:49:24 +01003078uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3079{
3080 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3081}
3082
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003083/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003084static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3085 hwaddr addr,
3086 MemTxAttrs attrs,
3087 MemTxResult *result,
3088 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003089{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003090 uint8_t *ptr;
3091 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003092 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003093 hwaddr l = 2;
3094 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003095 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003096 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003097
Paolo Bonzini41063e12015-03-18 14:21:43 +01003098 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003099 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003100 false);
3101 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003102 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003103
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003104 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003105 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003106#if defined(TARGET_WORDS_BIGENDIAN)
3107 if (endian == DEVICE_LITTLE_ENDIAN) {
3108 val = bswap16(val);
3109 }
3110#else
3111 if (endian == DEVICE_BIG_ENDIAN) {
3112 val = bswap16(val);
3113 }
3114#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003115 } else {
3116 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003117 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003118 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003119 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003120 switch (endian) {
3121 case DEVICE_LITTLE_ENDIAN:
3122 val = lduw_le_p(ptr);
3123 break;
3124 case DEVICE_BIG_ENDIAN:
3125 val = lduw_be_p(ptr);
3126 break;
3127 default:
3128 val = lduw_p(ptr);
3129 break;
3130 }
Peter Maydell50013112015-04-26 16:49:24 +01003131 r = MEMTX_OK;
3132 }
3133 if (result) {
3134 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003135 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003136 if (release_lock) {
3137 qemu_mutex_unlock_iothread();
3138 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003139 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003140 return val;
bellardaab33092005-10-30 20:48:42 +00003141}
3142
Peter Maydell50013112015-04-26 16:49:24 +01003143uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3144 MemTxAttrs attrs, MemTxResult *result)
3145{
3146 return address_space_lduw_internal(as, addr, attrs, result,
3147 DEVICE_NATIVE_ENDIAN);
3148}
3149
3150uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3151 MemTxAttrs attrs, MemTxResult *result)
3152{
3153 return address_space_lduw_internal(as, addr, attrs, result,
3154 DEVICE_LITTLE_ENDIAN);
3155}
3156
3157uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3158 MemTxAttrs attrs, MemTxResult *result)
3159{
3160 return address_space_lduw_internal(as, addr, attrs, result,
3161 DEVICE_BIG_ENDIAN);
3162}
3163
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003164uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003165{
Peter Maydell50013112015-04-26 16:49:24 +01003166 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003167}
3168
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003169uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003170{
Peter Maydell50013112015-04-26 16:49:24 +01003171 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003172}
3173
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003174uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003175{
Peter Maydell50013112015-04-26 16:49:24 +01003176 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003177}
3178
bellard8df1cd02005-01-28 22:37:22 +00003179/* warning: addr must be aligned. The ram page is not masked as dirty
3180 and the code inside is not invalidated. It is useful if the dirty
3181 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003182void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3183 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003184{
bellard8df1cd02005-01-28 22:37:22 +00003185 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003186 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003187 hwaddr l = 4;
3188 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003189 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003190 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003191 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003192
Paolo Bonzini41063e12015-03-18 14:21:43 +01003193 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003194 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003195 true);
3196 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003197 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003198
Peter Maydell50013112015-04-26 16:49:24 +01003199 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003200 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003201 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003202 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003203 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003204
Paolo Bonzini845b6212015-03-23 11:45:53 +01003205 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3206 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003207 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003208 r = MEMTX_OK;
3209 }
3210 if (result) {
3211 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003212 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003213 if (release_lock) {
3214 qemu_mutex_unlock_iothread();
3215 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003216 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003217}
3218
Peter Maydell50013112015-04-26 16:49:24 +01003219void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3220{
3221 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3222}
3223
bellard8df1cd02005-01-28 22:37:22 +00003224/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003225static inline void address_space_stl_internal(AddressSpace *as,
3226 hwaddr addr, uint32_t val,
3227 MemTxAttrs attrs,
3228 MemTxResult *result,
3229 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003230{
bellard8df1cd02005-01-28 22:37:22 +00003231 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003232 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003233 hwaddr l = 4;
3234 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003235 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003236 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003237
Paolo Bonzini41063e12015-03-18 14:21:43 +01003238 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003239 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003240 true);
3241 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003242 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003243
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003244#if defined(TARGET_WORDS_BIGENDIAN)
3245 if (endian == DEVICE_LITTLE_ENDIAN) {
3246 val = bswap32(val);
3247 }
3248#else
3249 if (endian == DEVICE_BIG_ENDIAN) {
3250 val = bswap32(val);
3251 }
3252#endif
Peter Maydell50013112015-04-26 16:49:24 +01003253 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003254 } else {
bellard8df1cd02005-01-28 22:37:22 +00003255 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003256 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003257 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003258 switch (endian) {
3259 case DEVICE_LITTLE_ENDIAN:
3260 stl_le_p(ptr, val);
3261 break;
3262 case DEVICE_BIG_ENDIAN:
3263 stl_be_p(ptr, val);
3264 break;
3265 default:
3266 stl_p(ptr, val);
3267 break;
3268 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003269 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003270 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003271 }
Peter Maydell50013112015-04-26 16:49:24 +01003272 if (result) {
3273 *result = r;
3274 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003275 if (release_lock) {
3276 qemu_mutex_unlock_iothread();
3277 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003278 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003279}
3280
3281void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3282 MemTxAttrs attrs, MemTxResult *result)
3283{
3284 address_space_stl_internal(as, addr, val, attrs, result,
3285 DEVICE_NATIVE_ENDIAN);
3286}
3287
3288void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3289 MemTxAttrs attrs, MemTxResult *result)
3290{
3291 address_space_stl_internal(as, addr, val, attrs, result,
3292 DEVICE_LITTLE_ENDIAN);
3293}
3294
3295void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3296 MemTxAttrs attrs, MemTxResult *result)
3297{
3298 address_space_stl_internal(as, addr, val, attrs, result,
3299 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003300}
3301
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003302void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003303{
Peter Maydell50013112015-04-26 16:49:24 +01003304 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003305}
3306
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003307void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003308{
Peter Maydell50013112015-04-26 16:49:24 +01003309 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003310}
3311
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003312void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313{
Peter Maydell50013112015-04-26 16:49:24 +01003314 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003315}
3316
bellardaab33092005-10-30 20:48:42 +00003317/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003318void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3319 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003320{
3321 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003322 MemTxResult r;
3323
3324 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3325 if (result) {
3326 *result = r;
3327 }
3328}
3329
3330void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3331{
3332 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003333}
3334
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003335/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003336static inline void address_space_stw_internal(AddressSpace *as,
3337 hwaddr addr, uint32_t val,
3338 MemTxAttrs attrs,
3339 MemTxResult *result,
3340 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003341{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003342 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003343 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003344 hwaddr l = 2;
3345 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003346 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003347 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003348
Paolo Bonzini41063e12015-03-18 14:21:43 +01003349 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003350 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003351 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003352 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003353
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003354#if defined(TARGET_WORDS_BIGENDIAN)
3355 if (endian == DEVICE_LITTLE_ENDIAN) {
3356 val = bswap16(val);
3357 }
3358#else
3359 if (endian == DEVICE_BIG_ENDIAN) {
3360 val = bswap16(val);
3361 }
3362#endif
Peter Maydell50013112015-04-26 16:49:24 +01003363 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003364 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003365 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003366 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003367 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368 switch (endian) {
3369 case DEVICE_LITTLE_ENDIAN:
3370 stw_le_p(ptr, val);
3371 break;
3372 case DEVICE_BIG_ENDIAN:
3373 stw_be_p(ptr, val);
3374 break;
3375 default:
3376 stw_p(ptr, val);
3377 break;
3378 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003379 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003380 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003381 }
Peter Maydell50013112015-04-26 16:49:24 +01003382 if (result) {
3383 *result = r;
3384 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003385 if (release_lock) {
3386 qemu_mutex_unlock_iothread();
3387 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003388 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003389}
3390
3391void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3392 MemTxAttrs attrs, MemTxResult *result)
3393{
3394 address_space_stw_internal(as, addr, val, attrs, result,
3395 DEVICE_NATIVE_ENDIAN);
3396}
3397
3398void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3399 MemTxAttrs attrs, MemTxResult *result)
3400{
3401 address_space_stw_internal(as, addr, val, attrs, result,
3402 DEVICE_LITTLE_ENDIAN);
3403}
3404
3405void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3406 MemTxAttrs attrs, MemTxResult *result)
3407{
3408 address_space_stw_internal(as, addr, val, attrs, result,
3409 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003410}
3411
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003412void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413{
Peter Maydell50013112015-04-26 16:49:24 +01003414 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003415}
3416
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003417void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003418{
Peter Maydell50013112015-04-26 16:49:24 +01003419 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003420}
3421
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003422void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003423{
Peter Maydell50013112015-04-26 16:49:24 +01003424 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003425}
3426
bellardaab33092005-10-30 20:48:42 +00003427/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003428void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3429 MemTxAttrs attrs, MemTxResult *result)
3430{
3431 MemTxResult r;
3432 val = tswap64(val);
3433 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3434 if (result) {
3435 *result = r;
3436 }
3437}
3438
3439void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3440 MemTxAttrs attrs, MemTxResult *result)
3441{
3442 MemTxResult r;
3443 val = cpu_to_le64(val);
3444 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3445 if (result) {
3446 *result = r;
3447 }
3448}
3449void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3450 MemTxAttrs attrs, MemTxResult *result)
3451{
3452 MemTxResult r;
3453 val = cpu_to_be64(val);
3454 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3455 if (result) {
3456 *result = r;
3457 }
3458}
3459
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003460void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003461{
Peter Maydell50013112015-04-26 16:49:24 +01003462 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003463}
3464
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003465void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003466{
Peter Maydell50013112015-04-26 16:49:24 +01003467 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003468}
3469
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003470void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003471{
Peter Maydell50013112015-04-26 16:49:24 +01003472 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003473}
3474
aliguori5e2972f2009-03-28 17:51:36 +00003475/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003476int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003477 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003478{
3479 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003480 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003481 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003482
3483 while (len > 0) {
3484 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003485 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003486 /* if no physical page mapped, return an error */
3487 if (phys_addr == -1)
3488 return -1;
3489 l = (page + TARGET_PAGE_SIZE) - addr;
3490 if (l > len)
3491 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003492 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003493 if (is_write) {
3494 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3495 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003496 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3497 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003498 }
bellard13eb76e2004-01-24 15:23:36 +00003499 len -= l;
3500 buf += l;
3501 addr += l;
3502 }
3503 return 0;
3504}
Paul Brooka68fe892010-03-01 00:08:59 +00003505#endif
bellard13eb76e2004-01-24 15:23:36 +00003506
Blue Swirl8e4a4242013-01-06 18:30:17 +00003507/*
3508 * A helper function for the _utterly broken_ virtio device model to find out if
3509 * it's running on a big endian machine. Don't do this at home kids!
3510 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003511bool target_words_bigendian(void);
3512bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003513{
3514#if defined(TARGET_WORDS_BIGENDIAN)
3515 return true;
3516#else
3517 return false;
3518#endif
3519}
3520
Wen Congyang76f35532012-05-07 12:04:18 +08003521#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003522bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003523{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003524 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003525 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003526 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003527
Paolo Bonzini41063e12015-03-18 14:21:43 +01003528 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003529 mr = address_space_translate(&address_space_memory,
3530 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003531
Paolo Bonzini41063e12015-03-18 14:21:43 +01003532 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3533 rcu_read_unlock();
3534 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003535}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003536
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003537int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003538{
3539 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003540 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003541
Mike Day0dc3f442013-09-05 14:41:35 -04003542 rcu_read_lock();
3543 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003544 ret = func(block->idstr, block->host, block->offset,
3545 block->used_length, opaque);
3546 if (ret) {
3547 break;
3548 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003549 }
Mike Day0dc3f442013-09-05 14:41:35 -04003550 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003551 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003552}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003553#endif