blob: b60e2dea8b2ffdd9fd11ac7ddca7a14193fa7afe [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030091/* RAM is backed by an mmapped file.
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030092 */
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030093#define RAM_FILE (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200314static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200315 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000316{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200317 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200318 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200319 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200320
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200321 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200322 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200324 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200325 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100326 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200327 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200328
329 if (sections[lp.ptr].size.hi ||
330 range_covers_byte(sections[lp.ptr].offset_within_address_space,
331 sections[lp.ptr].size.lo, addr)) {
332 return &sections[lp.ptr];
333 } else {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200336}
337
Blue Swirle5548612012-04-21 13:08:33 +0000338bool memory_region_is_unassigned(MemoryRegion *mr)
339{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200340 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000341 && mr != &io_mem_watch;
342}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100344/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200345static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200346 hwaddr addr,
347 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200348{
Jan Kiszka90260c62013-05-26 21:46:51 +0200349 MemoryRegionSection *section;
350 subpage_t *subpage;
351
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200352 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 if (resolve_subpage && section->mr->subpage) {
354 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200355 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 }
357 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200358}
359
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100360/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200361static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200362address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200363 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200364{
365 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200366 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100367 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200368
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200369 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200370 /* Compute offset within MemoryRegionSection */
371 addr -= section->offset_within_address_space;
372
373 /* Compute offset within MemoryRegion */
374 *xlat = addr + section->offset_within_region;
375
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200376 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200377
378 /* MMIO registers can be expected to perform full-width accesses based only
379 * on their address, without considering adjacent registers that could
380 * decode to completely different MemoryRegions. When such registers
381 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
382 * regions overlap wildly. For this reason we cannot clamp the accesses
383 * here.
384 *
385 * If the length is small (as is the case for address_space_ldl/stl),
386 * everything works fine. If the incoming length is large, however,
387 * the caller really has to do the clamping through memory_access_size.
388 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200390 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200391 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
392 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 return section;
394}
Jan Kiszka90260c62013-05-26 21:46:51 +0200395
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100396static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
397{
398 if (memory_region_is_ram(mr)) {
399 return !(is_write && mr->readonly);
400 }
401 if (memory_region_is_romd(mr)) {
402 return !is_write;
403 }
404
405 return false;
406}
407
Paolo Bonzini41063e12015-03-18 14:21:43 +0100408/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200409MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
410 hwaddr *xlat, hwaddr *plen,
411 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200412{
Avi Kivity30951152012-10-30 13:47:46 +0200413 IOMMUTLBEntry iotlb;
414 MemoryRegionSection *section;
415 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200416
417 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100418 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
419 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200420 mr = section->mr;
421
422 if (!mr->iommu_ops) {
423 break;
424 }
425
Le Tan8d7b8cb2014-08-16 13:55:37 +0800426 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200427 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
428 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700429 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200430 if (!(iotlb.perm & (1 << is_write))) {
431 mr = &io_mem_unassigned;
432 break;
433 }
434
435 as = iotlb.target_as;
436 }
437
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000438 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100439 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100441 }
442
Avi Kivity30951152012-10-30 13:47:46 +0200443 *xlat = addr;
444 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200445}
446
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100447/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200448MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200449address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
450 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200451{
Avi Kivity30951152012-10-30 13:47:46 +0200452 MemoryRegionSection *section;
Peter Maydell32857f42015-10-01 15:29:50 +0100453 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200454 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200455
456 assert(!section->mr->iommu_ops);
457 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200458}
bellard9fa3e852004-01-04 18:06:42 +0000459#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000460
Andreas Färberb170fce2013-01-20 20:23:22 +0100461#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000462
Juan Quintelae59fb372009-09-29 22:48:21 +0200463static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200464{
Andreas Färber259186a2013-01-17 18:51:17 +0100465 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200466
aurel323098dba2009-03-07 21:28:24 +0000467 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
468 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100469 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100470 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000471
472 return 0;
473}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475static int cpu_common_pre_load(void *opaque)
476{
477 CPUState *cpu = opaque;
478
Paolo Bonziniadee6422014-12-19 12:53:14 +0100479 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400480
481 return 0;
482}
483
484static bool cpu_common_exception_index_needed(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489}
490
491static const VMStateDescription vmstate_cpu_common_exception_index = {
492 .name = "cpu_common/exception_index",
493 .version_id = 1,
494 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200495 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496 .fields = (VMStateField[]) {
497 VMSTATE_INT32(exception_index, CPUState),
498 VMSTATE_END_OF_LIST()
499 }
500};
501
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300502static bool cpu_common_crash_occurred_needed(void *opaque)
503{
504 CPUState *cpu = opaque;
505
506 return cpu->crash_occurred;
507}
508
509static const VMStateDescription vmstate_cpu_common_crash_occurred = {
510 .name = "cpu_common/crash_occurred",
511 .version_id = 1,
512 .minimum_version_id = 1,
513 .needed = cpu_common_crash_occurred_needed,
514 .fields = (VMStateField[]) {
515 VMSTATE_BOOL(crash_occurred, CPUState),
516 VMSTATE_END_OF_LIST()
517 }
518};
519
Andreas Färber1a1562f2013-06-17 04:09:11 +0200520const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200521 .name = "cpu_common",
522 .version_id = 1,
523 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200525 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200526 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100527 VMSTATE_UINT32(halted, CPUState),
528 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200529 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200531 .subsections = (const VMStateDescription*[]) {
532 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300533 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200534 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 }
536};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200537
pbrook9656f322008-07-01 20:01:19 +0000538#endif
539
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100540CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400541{
Andreas Färberbdc44642013-06-24 23:50:24 +0200542 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400543
Andreas Färberbdc44642013-06-24 23:50:24 +0200544 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100545 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200546 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100547 }
Glauber Costa950f1472009-06-09 12:15:18 -0400548 }
549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400551}
552
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000553#if !defined(CONFIG_USER_ONLY)
554void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
555{
556 /* We only support one address space per cpu at the moment. */
557 assert(cpu->as == as);
558
Peter Maydell32857f42015-10-01 15:29:50 +0100559 if (cpu->cpu_ases) {
560 /* We've already registered the listener for our only AS */
561 return;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562 }
Peter Maydell32857f42015-10-01 15:29:50 +0100563
564 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
565 cpu->cpu_ases[0].cpu = cpu;
566 cpu->cpu_ases[0].as = as;
567 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
568 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000569}
570#endif
571
Bharata B Raob7bca732015-06-23 19:31:13 -0700572#ifndef CONFIG_USER_ONLY
573static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
574
575static int cpu_get_free_index(Error **errp)
576{
577 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
578
579 if (cpu >= MAX_CPUMASK_BITS) {
580 error_setg(errp, "Trying to use more CPUs than max of %d",
581 MAX_CPUMASK_BITS);
582 return -1;
583 }
584
585 bitmap_set(cpu_index_map, cpu, 1);
586 return cpu;
587}
588
589void cpu_exec_exit(CPUState *cpu)
590{
591 if (cpu->cpu_index == -1) {
592 /* cpu_index was never allocated by this @cpu or was already freed. */
593 return;
594 }
595
596 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
597 cpu->cpu_index = -1;
598}
599#else
600
601static int cpu_get_free_index(Error **errp)
602{
603 CPUState *some_cpu;
604 int cpu_index = 0;
605
606 CPU_FOREACH(some_cpu) {
607 cpu_index++;
608 }
609 return cpu_index;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614}
615#endif
616
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700617void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000618{
Andreas Färberb170fce2013-01-20 20:23:22 +0100619 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000620 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700621 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000622
Eduardo Habkost291135b2015-04-27 17:00:33 -0300623#ifndef CONFIG_USER_ONLY
624 cpu->as = &address_space_memory;
625 cpu->thread_id = qemu_get_thread_id();
Eduardo Habkost291135b2015-04-27 17:00:33 -0300626#endif
627
pbrookc2764712009-03-07 15:24:59 +0000628#if defined(CONFIG_USER_ONLY)
629 cpu_list_lock();
630#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700631 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
632 if (local_err) {
633 error_propagate(errp, local_err);
634#if defined(CONFIG_USER_ONLY)
635 cpu_list_unlock();
636#endif
637 return;
bellard6a00d602005-11-21 23:25:50 +0000638 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200639 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000640#if defined(CONFIG_USER_ONLY)
641 cpu_list_unlock();
642#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200643 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
644 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
645 }
pbrookb3c77242008-06-30 16:31:04 +0000646#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600647 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700648 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200650 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000651#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100652 if (cc->vmsd != NULL) {
653 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
654 }
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Paul Brook94df27f2010-02-28 23:47:45 +0000657#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200658static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000659{
660 tb_invalidate_phys_page_range(pc, pc + 1, 0);
661}
662#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200663static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400664{
Max Filippove8262a12013-09-27 22:29:17 +0400665 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
666 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000667 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100668 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400669 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400670}
bellardc27004e2005-01-03 23:35:10 +0000671#endif
bellardd720b932004-04-25 17:57:43 +0000672
Paul Brookc527ee82010-03-01 03:31:14 +0000673#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200674void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000675
676{
677}
678
Peter Maydell3ee887e2014-09-12 14:06:48 +0100679int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
680 int flags)
681{
682 return -ENOSYS;
683}
684
685void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
686{
687}
688
Andreas Färber75a34032013-09-02 16:57:02 +0200689int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000690 int flags, CPUWatchpoint **watchpoint)
691{
692 return -ENOSYS;
693}
694#else
pbrook6658ffb2007-03-16 23:58:11 +0000695/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200696int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000697 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000698{
aliguoric0ce9982008-11-25 22:13:57 +0000699 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000700
Peter Maydell05068c02014-09-12 14:06:48 +0100701 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700702 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200703 error_report("tried to set invalid watchpoint at %"
704 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000705 return -EINVAL;
706 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500707 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000708
aliguoria1d1bb32008-11-18 20:07:32 +0000709 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100710 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000711 wp->flags = flags;
712
aliguori2dc9f412008-11-18 20:56:59 +0000713 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200714 if (flags & BP_GDB) {
715 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
716 } else {
717 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
718 }
aliguoria1d1bb32008-11-18 20:07:32 +0000719
Andreas Färber31b030d2013-09-04 01:29:02 +0200720 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000721
722 if (watchpoint)
723 *watchpoint = wp;
724 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000725}
726
aliguoria1d1bb32008-11-18 20:07:32 +0000727/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000729 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000730{
aliguoria1d1bb32008-11-18 20:07:32 +0000731 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000732
Andreas Färberff4700b2013-08-26 18:23:18 +0200733 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100734 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000735 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200736 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000737 return 0;
738 }
739 }
aliguoria1d1bb32008-11-18 20:07:32 +0000740 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000741}
742
aliguoria1d1bb32008-11-18 20:07:32 +0000743/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200744void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000745{
Andreas Färberff4700b2013-08-26 18:23:18 +0200746 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000747
Andreas Färber31b030d2013-09-04 01:29:02 +0200748 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000749
Anthony Liguori7267c092011-08-20 22:09:37 -0500750 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000751}
752
aliguoria1d1bb32008-11-18 20:07:32 +0000753/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200754void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000755{
aliguoric0ce9982008-11-25 22:13:57 +0000756 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000757
Andreas Färberff4700b2013-08-26 18:23:18 +0200758 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200759 if (wp->flags & mask) {
760 cpu_watchpoint_remove_by_ref(cpu, wp);
761 }
aliguoric0ce9982008-11-25 22:13:57 +0000762 }
aliguoria1d1bb32008-11-18 20:07:32 +0000763}
Peter Maydell05068c02014-09-12 14:06:48 +0100764
765/* Return true if this watchpoint address matches the specified
766 * access (ie the address range covered by the watchpoint overlaps
767 * partially or completely with the address range covered by the
768 * access).
769 */
770static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
771 vaddr addr,
772 vaddr len)
773{
774 /* We know the lengths are non-zero, but a little caution is
775 * required to avoid errors in the case where the range ends
776 * exactly at the top of the address space and so addr + len
777 * wraps round to zero.
778 */
779 vaddr wpend = wp->vaddr + wp->len - 1;
780 vaddr addrend = addr + len - 1;
781
782 return !(addr > wpend || wp->vaddr > addrend);
783}
784
Paul Brookc527ee82010-03-01 03:31:14 +0000785#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000786
787/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200788int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000789 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000792
Anthony Liguori7267c092011-08-20 22:09:37 -0500793 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000794
795 bp->pc = pc;
796 bp->flags = flags;
797
aliguori2dc9f412008-11-18 20:56:59 +0000798 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200801 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200802 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200803 }
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Andreas Färberf0c3c502013-08-26 21:22:53 +0200805 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000806
Andreas Färber00b941e2013-06-29 18:55:54 +0200807 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000808 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200809 }
aliguoria1d1bb32008-11-18 20:07:32 +0000810 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000811}
812
813/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200814int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000815{
aliguoria1d1bb32008-11-18 20:07:32 +0000816 CPUBreakpoint *bp;
817
Andreas Färberf0c3c502013-08-26 21:22:53 +0200818 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000819 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000821 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000822 }
bellard4c3a88a2003-07-26 12:06:08 +0000823 }
aliguoria1d1bb32008-11-18 20:07:32 +0000824 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000825}
826
aliguoria1d1bb32008-11-18 20:07:32 +0000827/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200828void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000829{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200830 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
831
832 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000833
Anthony Liguori7267c092011-08-20 22:09:37 -0500834 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000835}
836
837/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200838void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000839{
aliguoric0ce9982008-11-25 22:13:57 +0000840 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200843 if (bp->flags & mask) {
844 cpu_breakpoint_remove_by_ref(cpu, bp);
845 }
aliguoric0ce9982008-11-25 22:13:57 +0000846 }
bellard4c3a88a2003-07-26 12:06:08 +0000847}
848
bellardc33a3462003-07-29 20:50:33 +0000849/* enable or disable single step mode. EXCP_DEBUG is returned by the
850 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200851void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000852{
Andreas Färbered2803d2013-06-21 20:20:45 +0200853 if (cpu->singlestep_enabled != enabled) {
854 cpu->singlestep_enabled = enabled;
855 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200856 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200857 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100858 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000859 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700860 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000861 }
bellardc33a3462003-07-29 20:50:33 +0000862 }
bellardc33a3462003-07-29 20:50:33 +0000863}
864
Andreas Färbera47dddd2013-09-03 17:38:47 +0200865void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000866{
867 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000868 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000869
870 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000871 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000872 fprintf(stderr, "qemu: fatal: ");
873 vfprintf(stderr, fmt, ap);
874 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200875 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000876 if (qemu_log_enabled()) {
877 qemu_log("qemu: fatal: ");
878 qemu_log_vprintf(fmt, ap2);
879 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200880 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000881 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000882 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000883 }
pbrook493ae1f2007-11-23 16:53:59 +0000884 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000885 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300886 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200887#if defined(CONFIG_USER_ONLY)
888 {
889 struct sigaction act;
890 sigfillset(&act.sa_mask);
891 act.sa_handler = SIG_DFL;
892 sigaction(SIGABRT, &act, NULL);
893 }
894#endif
bellard75012672003-06-21 13:11:07 +0000895 abort();
896}
897
bellard01243112004-01-04 15:48:17 +0000898#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400899/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200900static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
901{
902 RAMBlock *block;
903
Paolo Bonzini43771532013-09-09 17:58:40 +0200904 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200905 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200906 goto found;
907 }
Mike Day0dc3f442013-09-05 14:41:35 -0400908 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200909 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200910 goto found;
911 }
912 }
913
914 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
915 abort();
916
917found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200918 /* It is safe to write mru_block outside the iothread lock. This
919 * is what happens:
920 *
921 * mru_block = xxx
922 * rcu_read_unlock()
923 * xxx removed from list
924 * rcu_read_lock()
925 * read mru_block
926 * mru_block = NULL;
927 * call_rcu(reclaim_ramblock, xxx);
928 * rcu_read_unlock()
929 *
930 * atomic_rcu_set is not needed here. The block was already published
931 * when it was placed into the list. Here we're just making an extra
932 * copy of the pointer.
933 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200934 ram_list.mru_block = block;
935 return block;
936}
937
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200938static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000939{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700940 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200942 RAMBlock *block;
943 ram_addr_t end;
944
945 end = TARGET_PAGE_ALIGN(start + length);
946 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000947
Mike Day0dc3f442013-09-05 14:41:35 -0400948 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 block = qemu_get_ram_block(start);
950 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200951 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700952 CPU_FOREACH(cpu) {
953 tlb_reset_dirty(cpu, start1, length);
954 }
Mike Day0dc3f442013-09-05 14:41:35 -0400955 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200956}
957
958/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000959bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
960 ram_addr_t length,
961 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200962{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963 unsigned long end, page;
964 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200965
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000966 if (length == 0) {
967 return false;
968 }
969
970 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
971 page = start >> TARGET_PAGE_BITS;
972 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
973 page, end - page);
974
975 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200976 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200977 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000978
979 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000980}
981
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100982/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200983hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200984 MemoryRegionSection *section,
985 target_ulong vaddr,
986 hwaddr paddr, hwaddr xlat,
987 int prot,
988 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000989{
Avi Kivitya8170e52012-10-23 12:30:10 +0200990 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000991 CPUWatchpoint *wp;
992
Blue Swirlcc5bea62012-04-14 14:56:48 +0000993 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000994 /* Normal RAM. */
995 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200996 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000997 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200998 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000999 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001000 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001001 }
1002 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001003 AddressSpaceDispatch *d;
1004
1005 d = atomic_rcu_read(&section->address_space->dispatch);
1006 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001007 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001008 }
1009
1010 /* Make accesses to pages with watchpoints go via the
1011 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001012 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001013 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001014 /* Avoid trapping reads of pages with a write breakpoint. */
1015 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001016 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001017 *address |= TLB_MMIO;
1018 break;
1019 }
1020 }
1021 }
1022
1023 return iotlb;
1024}
bellard9fa3e852004-01-04 18:06:42 +00001025#endif /* defined(CONFIG_USER_ONLY) */
1026
pbrooke2eef172008-06-08 01:09:01 +00001027#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001028
Anthony Liguoric227f092009-10-01 16:12:16 -05001029static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001030 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001031static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001032
Igor Mammedova2b257d2014-10-31 16:38:37 +00001033static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1034 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001035
1036/*
1037 * Set a custom physical guest memory alloator.
1038 * Accelerators with unusual needs may need this. Hopefully, we can
1039 * get rid of it eventually.
1040 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001041void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001042{
1043 phys_mem_alloc = alloc;
1044}
1045
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001046static uint16_t phys_section_add(PhysPageMap *map,
1047 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001048{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001049 /* The physical section number is ORed with a page-aligned
1050 * pointer to produce the iotlb entries. Thus it should
1051 * never overflow into the page-aligned value.
1052 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001053 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001054
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001055 if (map->sections_nb == map->sections_nb_alloc) {
1056 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1057 map->sections = g_renew(MemoryRegionSection, map->sections,
1058 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001059 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001061 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001062 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001063}
1064
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001065static void phys_section_destroy(MemoryRegion *mr)
1066{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001067 memory_region_unref(mr);
1068
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001069 if (mr->subpage) {
1070 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001071 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001072 g_free(subpage);
1073 }
1074}
1075
Paolo Bonzini60926662013-05-29 12:30:26 +02001076static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001077{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001078 while (map->sections_nb > 0) {
1079 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001080 phys_section_destroy(section->mr);
1081 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001082 g_free(map->sections);
1083 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001084}
1085
Avi Kivityac1970f2012-10-03 16:22:53 +02001086static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001087{
1088 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001089 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001090 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001091 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001092 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001093 MemoryRegionSection subsection = {
1094 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001095 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001096 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001097 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001098
Avi Kivityf3705d52012-03-08 16:16:34 +02001099 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001100
Avi Kivityf3705d52012-03-08 16:16:34 +02001101 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001102 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001103 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001104 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001105 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001108 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109 }
1110 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001111 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001112 subpage_register(subpage, start, end,
1113 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001114}
1115
1116
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001117static void register_multipage(AddressSpaceDispatch *d,
1118 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001119{
Avi Kivitya8170e52012-10-23 12:30:10 +02001120 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001121 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001122 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1123 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001124
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001125 assert(num_pages);
1126 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001127}
1128
Avi Kivityac1970f2012-10-03 16:22:53 +02001129static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001130{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001131 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001132 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001133 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001134 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001136 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1137 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1138 - now.offset_within_address_space;
1139
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001140 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001141 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001142 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001144 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001145 while (int128_ne(remain.size, now.size)) {
1146 remain.size = int128_sub(remain.size, now.size);
1147 remain.offset_within_address_space += int128_get64(now.size);
1148 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001149 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001150 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001151 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001152 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001153 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001154 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001155 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001156 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001157 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001158 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 }
1160}
1161
Sheng Yang62a27442010-01-26 19:21:16 +08001162void qemu_flush_coalesced_mmio_buffer(void)
1163{
1164 if (kvm_enabled())
1165 kvm_flush_coalesced_mmio_buffer();
1166}
1167
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001168void qemu_mutex_lock_ramlist(void)
1169{
1170 qemu_mutex_lock(&ram_list.mutex);
1171}
1172
1173void qemu_mutex_unlock_ramlist(void)
1174{
1175 qemu_mutex_unlock(&ram_list.mutex);
1176}
1177
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001178#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001179
1180#include <sys/vfs.h>
1181
1182#define HUGETLBFS_MAGIC 0x958458f6
1183
Hu Taofc7a5802014-09-09 13:28:01 +08001184static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001185{
1186 struct statfs fs;
1187 int ret;
1188
1189 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001190 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001191 } while (ret != 0 && errno == EINTR);
1192
1193 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001194 error_setg_errno(errp, errno, "failed to get page size of file %s",
1195 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001196 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197 }
1198
1199 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001200 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001201
1202 return fs.f_bsize;
1203}
1204
Alex Williamson04b16652010-07-02 11:13:17 -06001205static void *file_ram_alloc(RAMBlock *block,
1206 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001207 const char *path,
1208 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209{
1210 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001211 char *sanitized_name;
1212 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001213 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001214 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001215 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001216 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001217
Hu Taofc7a5802014-09-09 13:28:01 +08001218 hpagesize = gethugepagesize(path, &local_err);
1219 if (local_err) {
1220 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001221 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001222 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001223 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224
1225 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001226 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1227 "or larger than huge page size 0x%" PRIx64,
1228 memory, hpagesize);
1229 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001230 }
1231
1232 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001233 error_setg(errp,
1234 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001235 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236 }
1237
Peter Feiner8ca761f2013-03-04 13:54:25 -05001238 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001239 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001240 for (c = sanitized_name; *c != '\0'; c++) {
1241 if (*c == '/')
1242 *c = '_';
1243 }
1244
1245 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1246 sanitized_name);
1247 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001248
1249 fd = mkstemp(filename);
1250 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001251 error_setg_errno(errp, errno,
1252 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001253 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001254 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001255 }
1256 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001257 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001258
Chen Hanxiao9284f312015-07-24 11:12:03 +08001259 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001260
1261 /*
1262 * ftruncate is not supported by hugetlbfs in older
1263 * hosts, so don't bother bailing out on errors.
1264 * If anything goes wrong with it under other filesystems,
1265 * mmap will fail.
1266 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001267 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001268 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001269 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001270
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001271 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001272 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001273 error_setg_errno(errp, errno,
1274 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001275 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001276 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001277 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001278
1279 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001280 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001281 }
1282
Alex Williamson04b16652010-07-02 11:13:17 -06001283 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001284 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001285
1286error:
1287 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001288 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001289 exit(1);
1290 }
1291 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001292}
1293#endif
1294
Mike Day0dc3f442013-09-05 14:41:35 -04001295/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001296static ram_addr_t find_ram_offset(ram_addr_t size)
1297{
Alex Williamson04b16652010-07-02 11:13:17 -06001298 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001299 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001300
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001301 assert(size != 0); /* it would hand out same offset multiple times */
1302
Mike Day0dc3f442013-09-05 14:41:35 -04001303 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001304 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001305 }
Alex Williamson04b16652010-07-02 11:13:17 -06001306
Mike Day0dc3f442013-09-05 14:41:35 -04001307 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001308 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001309
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001310 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001311
Mike Day0dc3f442013-09-05 14:41:35 -04001312 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001313 if (next_block->offset >= end) {
1314 next = MIN(next, next_block->offset);
1315 }
1316 }
1317 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001318 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001319 mingap = next - end;
1320 }
1321 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001322
1323 if (offset == RAM_ADDR_MAX) {
1324 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1325 (uint64_t)size);
1326 abort();
1327 }
1328
Alex Williamson04b16652010-07-02 11:13:17 -06001329 return offset;
1330}
1331
Juan Quintela652d7ec2012-07-20 10:37:54 +02001332ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001333{
Alex Williamsond17b5282010-06-25 11:08:38 -06001334 RAMBlock *block;
1335 ram_addr_t last = 0;
1336
Mike Day0dc3f442013-09-05 14:41:35 -04001337 rcu_read_lock();
1338 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001339 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001340 }
Mike Day0dc3f442013-09-05 14:41:35 -04001341 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001342 return last;
1343}
1344
Jason Baronddb97f12012-08-02 15:44:16 -04001345static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1346{
1347 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001348
1349 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001350 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001351 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1352 if (ret) {
1353 perror("qemu_madvise");
1354 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1355 "but dump_guest_core=off specified\n");
1356 }
1357 }
1358}
1359
Mike Day0dc3f442013-09-05 14:41:35 -04001360/* Called within an RCU critical section, or while the ramlist lock
1361 * is held.
1362 */
Hu Tao20cfe882014-04-02 15:13:26 +08001363static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001364{
Hu Tao20cfe882014-04-02 15:13:26 +08001365 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001366
Mike Day0dc3f442013-09-05 14:41:35 -04001367 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001368 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001369 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001370 }
1371 }
Hu Tao20cfe882014-04-02 15:13:26 +08001372
1373 return NULL;
1374}
1375
Mike Dayae3a7042013-09-05 14:41:35 -04001376/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001377void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1378{
Mike Dayae3a7042013-09-05 14:41:35 -04001379 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001380
Mike Day0dc3f442013-09-05 14:41:35 -04001381 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001382 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001383 assert(new_block);
1384 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001385
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001386 if (dev) {
1387 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001388 if (id) {
1389 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001390 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001391 }
1392 }
1393 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1394
Mike Day0dc3f442013-09-05 14:41:35 -04001395 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001396 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001397 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1398 new_block->idstr);
1399 abort();
1400 }
1401 }
Mike Day0dc3f442013-09-05 14:41:35 -04001402 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001403}
1404
Mike Dayae3a7042013-09-05 14:41:35 -04001405/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001406void qemu_ram_unset_idstr(ram_addr_t addr)
1407{
Mike Dayae3a7042013-09-05 14:41:35 -04001408 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001409
Mike Dayae3a7042013-09-05 14:41:35 -04001410 /* FIXME: arch_init.c assumes that this is not called throughout
1411 * migration. Ignore the problem since hot-unplug during migration
1412 * does not work anyway.
1413 */
1414
Mike Day0dc3f442013-09-05 14:41:35 -04001415 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001416 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001417 if (block) {
1418 memset(block->idstr, 0, sizeof(block->idstr));
1419 }
Mike Day0dc3f442013-09-05 14:41:35 -04001420 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001421}
1422
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001423static int memory_try_enable_merging(void *addr, size_t len)
1424{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001425 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001426 /* disabled by the user */
1427 return 0;
1428 }
1429
1430 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1431}
1432
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001433/* Only legal before guest might have detected the memory size: e.g. on
1434 * incoming migration, or right after reset.
1435 *
1436 * As memory core doesn't know how is memory accessed, it is up to
1437 * resize callback to update device state and/or add assertions to detect
1438 * misuse, if necessary.
1439 */
1440int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1441{
1442 RAMBlock *block = find_ram_block(base);
1443
1444 assert(block);
1445
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001446 newsize = TARGET_PAGE_ALIGN(newsize);
1447
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001448 if (block->used_length == newsize) {
1449 return 0;
1450 }
1451
1452 if (!(block->flags & RAM_RESIZEABLE)) {
1453 error_setg_errno(errp, EINVAL,
1454 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1455 " in != 0x" RAM_ADDR_FMT, block->idstr,
1456 newsize, block->used_length);
1457 return -EINVAL;
1458 }
1459
1460 if (block->max_length < newsize) {
1461 error_setg_errno(errp, EINVAL,
1462 "Length too large: %s: 0x" RAM_ADDR_FMT
1463 " > 0x" RAM_ADDR_FMT, block->idstr,
1464 newsize, block->max_length);
1465 return -EINVAL;
1466 }
1467
1468 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1469 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001470 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1471 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001472 memory_region_set_size(block->mr, newsize);
1473 if (block->resized) {
1474 block->resized(block->idstr, newsize, block->host);
1475 }
1476 return 0;
1477}
1478
Hu Taoef701d72014-09-09 13:27:54 +08001479static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001480{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001481 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001482 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001483 ram_addr_t old_ram_size, new_ram_size;
1484
1485 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001486
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001487 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001488 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001489
1490 if (!new_block->host) {
1491 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001492 xen_ram_alloc(new_block->offset, new_block->max_length,
1493 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001494 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001495 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001496 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001497 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001498 error_setg_errno(errp, errno,
1499 "cannot set up guest memory '%s'",
1500 memory_region_name(new_block->mr));
1501 qemu_mutex_unlock_ramlist();
1502 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001503 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001504 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001505 }
1506 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001507
Li Zhijiandd631692015-07-02 20:18:06 +08001508 new_ram_size = MAX(old_ram_size,
1509 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1510 if (new_ram_size > old_ram_size) {
1511 migration_bitmap_extend(old_ram_size, new_ram_size);
1512 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001513 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1514 * QLIST (which has an RCU-friendly variant) does not have insertion at
1515 * tail, so save the last element in last_block.
1516 */
Mike Day0dc3f442013-09-05 14:41:35 -04001517 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001518 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001519 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001520 break;
1521 }
1522 }
1523 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001524 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001525 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001526 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001527 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001528 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001529 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001530 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001531
Mike Day0dc3f442013-09-05 14:41:35 -04001532 /* Write list before version */
1533 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001534 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001535 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001536
Juan Quintela2152f5c2013-10-08 13:52:02 +02001537 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1538
1539 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001540 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001541
1542 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001543 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1544 ram_list.dirty_memory[i] =
1545 bitmap_zero_extend(ram_list.dirty_memory[i],
1546 old_ram_size, new_ram_size);
1547 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001548 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001549 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001550 new_block->used_length,
1551 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001552
Paolo Bonzinia904c912015-01-21 16:18:35 +01001553 if (new_block->host) {
1554 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1555 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1556 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1557 if (kvm_enabled()) {
1558 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1559 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001560 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001561
1562 return new_block->offset;
1563}
1564
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001565#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001566ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001567 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001568 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001569{
1570 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001571 ram_addr_t addr;
1572 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001573
1574 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001575 error_setg(errp, "-mem-path not supported with Xen");
1576 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577 }
1578
1579 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1580 /*
1581 * file_ram_alloc() needs to allocate just like
1582 * phys_mem_alloc, but we haven't bothered to provide
1583 * a hook there.
1584 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001585 error_setg(errp,
1586 "-mem-path not supported with this accelerator");
1587 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001588 }
1589
1590 size = TARGET_PAGE_ALIGN(size);
1591 new_block = g_malloc0(sizeof(*new_block));
1592 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001593 new_block->used_length = size;
1594 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001595 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001596 new_block->flags |= RAM_FILE;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001597 new_block->host = file_ram_alloc(new_block, size,
1598 mem_path, errp);
1599 if (!new_block->host) {
1600 g_free(new_block);
1601 return -1;
1602 }
1603
Hu Taoef701d72014-09-09 13:27:54 +08001604 addr = ram_block_add(new_block, &local_err);
1605 if (local_err) {
1606 g_free(new_block);
1607 error_propagate(errp, local_err);
1608 return -1;
1609 }
1610 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001611}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001612#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001613
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001614static
1615ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1616 void (*resized)(const char*,
1617 uint64_t length,
1618 void *host),
1619 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001620 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001621{
1622 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001623 ram_addr_t addr;
1624 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001625
1626 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001627 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001628 new_block = g_malloc0(sizeof(*new_block));
1629 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001630 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001631 new_block->used_length = size;
1632 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001633 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001634 new_block->fd = -1;
1635 new_block->host = host;
1636 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001637 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001638 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001639 if (resizeable) {
1640 new_block->flags |= RAM_RESIZEABLE;
1641 }
Hu Taoef701d72014-09-09 13:27:54 +08001642 addr = ram_block_add(new_block, &local_err);
1643 if (local_err) {
1644 g_free(new_block);
1645 error_propagate(errp, local_err);
1646 return -1;
1647 }
1648 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001649}
1650
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001651ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1652 MemoryRegion *mr, Error **errp)
1653{
1654 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1655}
1656
Hu Taoef701d72014-09-09 13:27:54 +08001657ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001658{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001659 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1660}
1661
1662ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1663 void (*resized)(const char*,
1664 uint64_t length,
1665 void *host),
1666 MemoryRegion *mr, Error **errp)
1667{
1668 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001669}
bellarde9a1ab12007-02-08 23:08:38 +00001670
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001671void qemu_ram_free_from_ptr(ram_addr_t addr)
1672{
1673 RAMBlock *block;
1674
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001675 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001676 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001677 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001678 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001679 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001680 /* Write list before version */
1681 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001682 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001683 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001684 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001685 }
1686 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001687 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001688}
1689
Paolo Bonzini43771532013-09-09 17:58:40 +02001690static void reclaim_ramblock(RAMBlock *block)
1691{
1692 if (block->flags & RAM_PREALLOC) {
1693 ;
1694 } else if (xen_enabled()) {
1695 xen_invalidate_map_cache_entry(block->host);
1696#ifndef _WIN32
1697 } else if (block->fd >= 0) {
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001698 if (block->flags & RAM_FILE) {
1699 qemu_ram_munmap(block->host, block->max_length);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001700 } else {
1701 munmap(block->host, block->max_length);
1702 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001703 close(block->fd);
1704#endif
1705 } else {
1706 qemu_anon_ram_free(block->host, block->max_length);
1707 }
1708 g_free(block);
1709}
1710
Anthony Liguoric227f092009-10-01 16:12:16 -05001711void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001712{
Alex Williamson04b16652010-07-02 11:13:17 -06001713 RAMBlock *block;
1714
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001715 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001716 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001717 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001718 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001719 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001720 /* Write list before version */
1721 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001722 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001723 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001724 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001725 }
1726 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001727 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001728}
1729
Huang Yingcd19cfa2011-03-02 08:56:19 +01001730#ifndef _WIN32
1731void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1732{
1733 RAMBlock *block;
1734 ram_addr_t offset;
1735 int flags;
1736 void *area, *vaddr;
1737
Mike Day0dc3f442013-09-05 14:41:35 -04001738 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001739 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001740 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001741 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001742 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001743 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001744 } else if (xen_enabled()) {
1745 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001746 } else {
1747 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001748 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001749 flags |= (block->flags & RAM_SHARED ?
1750 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001751 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1752 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001753 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001754 /*
1755 * Remap needs to match alloc. Accelerators that
1756 * set phys_mem_alloc never remap. If they did,
1757 * we'd need a remap hook here.
1758 */
1759 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1760
Huang Yingcd19cfa2011-03-02 08:56:19 +01001761 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1762 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1763 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001764 }
1765 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001766 fprintf(stderr, "Could not remap addr: "
1767 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001768 length, addr);
1769 exit(1);
1770 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001771 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001772 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001773 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774 }
1775 }
1776}
1777#endif /* !_WIN32 */
1778
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001779int qemu_get_ram_fd(ram_addr_t addr)
1780{
Mike Dayae3a7042013-09-05 14:41:35 -04001781 RAMBlock *block;
1782 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001783
Mike Day0dc3f442013-09-05 14:41:35 -04001784 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001785 block = qemu_get_ram_block(addr);
1786 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001787 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001788 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001789}
1790
Damjan Marion3fd74b82014-06-26 23:01:32 +02001791void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1792{
Mike Dayae3a7042013-09-05 14:41:35 -04001793 RAMBlock *block;
1794 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001795
Mike Day0dc3f442013-09-05 14:41:35 -04001796 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001797 block = qemu_get_ram_block(addr);
1798 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001799 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001800 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001801}
1802
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001803/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001804 * This should not be used for general purpose DMA. Use address_space_map
1805 * or address_space_rw instead. For local memory (e.g. video ram) that the
1806 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001807 *
1808 * By the time this function returns, the returned pointer is not protected
1809 * by RCU anymore. If the caller is not within an RCU critical section and
1810 * does not hold the iothread lock, it must have other means of protecting the
1811 * pointer, such as a reference to the region that includes the incoming
1812 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001813 */
1814void *qemu_get_ram_ptr(ram_addr_t addr)
1815{
Mike Dayae3a7042013-09-05 14:41:35 -04001816 RAMBlock *block;
1817 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001818
Mike Day0dc3f442013-09-05 14:41:35 -04001819 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001820 block = qemu_get_ram_block(addr);
1821
1822 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001823 /* We need to check if the requested address is in the RAM
1824 * because we don't want to map the entire memory in QEMU.
1825 * In that case just map until the end of the page.
1826 */
1827 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001828 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001829 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001830 }
Mike Dayae3a7042013-09-05 14:41:35 -04001831
1832 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001833 }
Mike Dayae3a7042013-09-05 14:41:35 -04001834 ptr = ramblock_ptr(block, addr - block->offset);
1835
Mike Day0dc3f442013-09-05 14:41:35 -04001836unlock:
1837 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001838 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001839}
1840
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001841/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001842 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001843 *
1844 * By the time this function returns, the returned pointer is not protected
1845 * by RCU anymore. If the caller is not within an RCU critical section and
1846 * does not hold the iothread lock, it must have other means of protecting the
1847 * pointer, such as a reference to the region that includes the incoming
1848 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001849 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001850static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001851{
Mike Dayae3a7042013-09-05 14:41:35 -04001852 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001853 if (*size == 0) {
1854 return NULL;
1855 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001856 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001857 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001858 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001859 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001860 rcu_read_lock();
1861 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001862 if (addr - block->offset < block->max_length) {
1863 if (addr - block->offset + *size > block->max_length)
1864 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001865 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001866 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001867 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001868 }
1869 }
1870
1871 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1872 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001873 }
1874}
1875
Paolo Bonzini7443b432013-06-03 12:44:02 +02001876/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001877 * (typically a TLB entry) back to a ram offset.
1878 *
1879 * By the time this function returns, the returned pointer is not protected
1880 * by RCU anymore. If the caller is not within an RCU critical section and
1881 * does not hold the iothread lock, it must have other means of protecting the
1882 * pointer, such as a reference to the region that includes the incoming
1883 * ram_addr_t.
1884 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001885MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001886{
pbrook94a6b542009-04-11 17:15:54 +00001887 RAMBlock *block;
1888 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001889 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001890
Jan Kiszka868bb332011-06-21 22:59:09 +02001891 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001892 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001893 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001894 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001895 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001896 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001897 }
1898
Mike Day0dc3f442013-09-05 14:41:35 -04001899 rcu_read_lock();
1900 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001901 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001902 goto found;
1903 }
1904
Mike Day0dc3f442013-09-05 14:41:35 -04001905 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001906 /* This case append when the block is not mapped. */
1907 if (block->host == NULL) {
1908 continue;
1909 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001910 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001911 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001912 }
pbrook94a6b542009-04-11 17:15:54 +00001913 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001914
Mike Day0dc3f442013-09-05 14:41:35 -04001915 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001916 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001917
1918found:
1919 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001920 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001921 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001922 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001923}
Alex Williamsonf471a172010-06-11 11:11:42 -06001924
Avi Kivitya8170e52012-10-23 12:30:10 +02001925static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001926 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001927{
Juan Quintela52159192013-10-08 12:44:04 +02001928 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001929 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001930 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001931 switch (size) {
1932 case 1:
1933 stb_p(qemu_get_ram_ptr(ram_addr), val);
1934 break;
1935 case 2:
1936 stw_p(qemu_get_ram_ptr(ram_addr), val);
1937 break;
1938 case 4:
1939 stl_p(qemu_get_ram_ptr(ram_addr), val);
1940 break;
1941 default:
1942 abort();
1943 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001944 /* Set both VGA and migration bits for simplicity and to remove
1945 * the notdirty callback faster.
1946 */
1947 cpu_physical_memory_set_dirty_range(ram_addr, size,
1948 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001949 /* we remove the notdirty callback only if the code has been
1950 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001951 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001952 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001953 }
bellard1ccde1c2004-02-06 19:46:14 +00001954}
1955
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001956static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1957 unsigned size, bool is_write)
1958{
1959 return is_write;
1960}
1961
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001962static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001963 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001964 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001965 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001966};
1967
pbrook0f459d12008-06-09 00:20:13 +00001968/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001969static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001970{
Andreas Färber93afead2013-08-26 03:41:01 +02001971 CPUState *cpu = current_cpu;
1972 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001973 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001974 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001975 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001976 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001977
Andreas Färberff4700b2013-08-26 18:23:18 +02001978 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001979 /* We re-entered the check after replacing the TB. Now raise
1980 * the debug interrupt so that is will trigger after the
1981 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001982 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001983 return;
1984 }
Andreas Färber93afead2013-08-26 03:41:01 +02001985 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001986 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001987 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1988 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001989 if (flags == BP_MEM_READ) {
1990 wp->flags |= BP_WATCHPOINT_HIT_READ;
1991 } else {
1992 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1993 }
1994 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001995 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001996 if (!cpu->watchpoint_hit) {
1997 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001998 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001999 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002000 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002001 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002002 } else {
2003 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002004 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002005 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002006 }
aliguori06d55cc2008-11-18 20:24:06 +00002007 }
aliguori6e140f22008-11-18 20:37:55 +00002008 } else {
2009 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002010 }
2011 }
2012}
2013
pbrook6658ffb2007-03-16 23:58:11 +00002014/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2015 so these check for a hit then pass through to the normal out-of-line
2016 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002017static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2018 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002019{
Peter Maydell66b9b432015-04-26 16:49:24 +01002020 MemTxResult res;
2021 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002022
Peter Maydell66b9b432015-04-26 16:49:24 +01002023 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002024 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002025 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002026 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002027 break;
2028 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002029 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002030 break;
2031 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002032 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002033 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002034 default: abort();
2035 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002036 *pdata = data;
2037 return res;
2038}
2039
2040static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2041 uint64_t val, unsigned size,
2042 MemTxAttrs attrs)
2043{
2044 MemTxResult res;
2045
2046 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2047 switch (size) {
2048 case 1:
2049 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2050 break;
2051 case 2:
2052 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2053 break;
2054 case 4:
2055 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2056 break;
2057 default: abort();
2058 }
2059 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002060}
2061
Avi Kivity1ec9b902012-01-02 12:47:48 +02002062static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002063 .read_with_attrs = watch_mem_read,
2064 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002065 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002066};
pbrook6658ffb2007-03-16 23:58:11 +00002067
Peter Maydellf25a49e2015-04-26 16:49:24 +01002068static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2069 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002070{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002071 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002072 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002073 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002074
blueswir1db7b5422007-05-26 17:36:03 +00002075#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002076 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002077 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002078#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002079 res = address_space_read(subpage->as, addr + subpage->base,
2080 attrs, buf, len);
2081 if (res) {
2082 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002083 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002084 switch (len) {
2085 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002086 *data = ldub_p(buf);
2087 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002088 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002089 *data = lduw_p(buf);
2090 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002091 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002092 *data = ldl_p(buf);
2093 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002094 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002095 *data = ldq_p(buf);
2096 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002097 default:
2098 abort();
2099 }
blueswir1db7b5422007-05-26 17:36:03 +00002100}
2101
Peter Maydellf25a49e2015-04-26 16:49:24 +01002102static MemTxResult subpage_write(void *opaque, hwaddr addr,
2103 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002104{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002105 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002106 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002107
blueswir1db7b5422007-05-26 17:36:03 +00002108#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002109 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002110 " value %"PRIx64"\n",
2111 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002112#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002113 switch (len) {
2114 case 1:
2115 stb_p(buf, value);
2116 break;
2117 case 2:
2118 stw_p(buf, value);
2119 break;
2120 case 4:
2121 stl_p(buf, value);
2122 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002123 case 8:
2124 stq_p(buf, value);
2125 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002126 default:
2127 abort();
2128 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002129 return address_space_write(subpage->as, addr + subpage->base,
2130 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002131}
2132
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002133static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002134 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002135{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002136 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002137#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002138 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002139 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002140#endif
2141
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002143 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002144}
2145
Avi Kivity70c68e42012-01-02 12:32:48 +02002146static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002147 .read_with_attrs = subpage_read,
2148 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002149 .impl.min_access_size = 1,
2150 .impl.max_access_size = 8,
2151 .valid.min_access_size = 1,
2152 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002153 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002154 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002155};
2156
Anthony Liguoric227f092009-10-01 16:12:16 -05002157static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002158 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002159{
2160 int idx, eidx;
2161
2162 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2163 return -1;
2164 idx = SUBPAGE_IDX(start);
2165 eidx = SUBPAGE_IDX(end);
2166#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002167 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2168 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002169#endif
blueswir1db7b5422007-05-26 17:36:03 +00002170 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002171 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002172 }
2173
2174 return 0;
2175}
2176
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002177static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002178{
Anthony Liguoric227f092009-10-01 16:12:16 -05002179 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002180
Anthony Liguori7267c092011-08-20 22:09:37 -05002181 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002182
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002183 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002184 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002185 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002186 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002187 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002188#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002189 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2190 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002191#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002192 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002193
2194 return mmio;
2195}
2196
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002197static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2198 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002199{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002200 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002201 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002202 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002203 .mr = mr,
2204 .offset_within_address_space = 0,
2205 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002206 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002207 };
2208
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002209 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002210}
2211
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002212MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002213{
Peter Maydell32857f42015-10-01 15:29:50 +01002214 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2215 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002216 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002217
2218 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002219}
2220
Avi Kivitye9179ce2009-06-14 11:38:52 +03002221static void io_mem_init(void)
2222{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002223 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002224 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002225 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002226 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002227 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002228 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002229 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002230}
2231
Avi Kivityac1970f2012-10-03 16:22:53 +02002232static void mem_begin(MemoryListener *listener)
2233{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002234 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002235 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2236 uint16_t n;
2237
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002238 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002239 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002240 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002241 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002242 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002243 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002244 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002245 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002246
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002247 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002248 d->as = as;
2249 as->next_dispatch = d;
2250}
2251
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002252static void address_space_dispatch_free(AddressSpaceDispatch *d)
2253{
2254 phys_sections_free(&d->map);
2255 g_free(d);
2256}
2257
Paolo Bonzini00752702013-05-29 12:13:54 +02002258static void mem_commit(MemoryListener *listener)
2259{
2260 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002261 AddressSpaceDispatch *cur = as->dispatch;
2262 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002263
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002264 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002265
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002266 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002267 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002268 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002269 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002270}
2271
Avi Kivity1d711482012-10-02 18:54:45 +02002272static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002273{
Peter Maydell32857f42015-10-01 15:29:50 +01002274 CPUAddressSpace *cpuas;
2275 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002276
2277 /* since each CPU stores ram addresses in its TLB cache, we must
2278 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002279 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2280 cpu_reloading_memory_map();
2281 /* The CPU and TLB are protected by the iothread lock.
2282 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2283 * may have split the RCU critical section.
2284 */
2285 d = atomic_rcu_read(&cpuas->as->dispatch);
2286 cpuas->memory_dispatch = d;
2287 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002288}
2289
Avi Kivityac1970f2012-10-03 16:22:53 +02002290void address_space_init_dispatch(AddressSpace *as)
2291{
Paolo Bonzini00752702013-05-29 12:13:54 +02002292 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002293 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002294 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002295 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002296 .region_add = mem_add,
2297 .region_nop = mem_add,
2298 .priority = 0,
2299 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002300 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002301}
2302
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002303void address_space_unregister(AddressSpace *as)
2304{
2305 memory_listener_unregister(&as->dispatch_listener);
2306}
2307
Avi Kivity83f3c252012-10-07 12:59:55 +02002308void address_space_destroy_dispatch(AddressSpace *as)
2309{
2310 AddressSpaceDispatch *d = as->dispatch;
2311
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002312 atomic_rcu_set(&as->dispatch, NULL);
2313 if (d) {
2314 call_rcu(d, address_space_dispatch_free, rcu);
2315 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002316}
2317
Avi Kivity62152b82011-07-26 14:26:14 +03002318static void memory_map_init(void)
2319{
Anthony Liguori7267c092011-08-20 22:09:37 -05002320 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002321
Paolo Bonzini57271d62013-11-07 17:14:37 +01002322 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002323 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002324
Anthony Liguori7267c092011-08-20 22:09:37 -05002325 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002326 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2327 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002328 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002329}
2330
2331MemoryRegion *get_system_memory(void)
2332{
2333 return system_memory;
2334}
2335
Avi Kivity309cb472011-08-08 16:09:03 +03002336MemoryRegion *get_system_io(void)
2337{
2338 return system_io;
2339}
2340
pbrooke2eef172008-06-08 01:09:01 +00002341#endif /* !defined(CONFIG_USER_ONLY) */
2342
bellard13eb76e2004-01-24 15:23:36 +00002343/* physical memory access (slow version, mainly for debug) */
2344#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002345int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002346 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002347{
2348 int l, flags;
2349 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002350 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002351
2352 while (len > 0) {
2353 page = addr & TARGET_PAGE_MASK;
2354 l = (page + TARGET_PAGE_SIZE) - addr;
2355 if (l > len)
2356 l = len;
2357 flags = page_get_flags(page);
2358 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002359 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002360 if (is_write) {
2361 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002362 return -1;
bellard579a97f2007-11-11 14:26:47 +00002363 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002364 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002365 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002366 memcpy(p, buf, l);
2367 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002368 } else {
2369 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002370 return -1;
bellard579a97f2007-11-11 14:26:47 +00002371 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002372 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002373 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002374 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002375 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002376 }
2377 len -= l;
2378 buf += l;
2379 addr += l;
2380 }
Paul Brooka68fe892010-03-01 00:08:59 +00002381 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002382}
bellard8df1cd02005-01-28 22:37:22 +00002383
bellard13eb76e2004-01-24 15:23:36 +00002384#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002385
Paolo Bonzini845b6212015-03-23 11:45:53 +01002386static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002387 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002388{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002389 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2390 /* No early return if dirty_log_mask is or becomes 0, because
2391 * cpu_physical_memory_set_dirty_range will still call
2392 * xen_modified_memory.
2393 */
2394 if (dirty_log_mask) {
2395 dirty_log_mask =
2396 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002397 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002398 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2399 tb_invalidate_phys_range(addr, addr + length);
2400 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2401 }
2402 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002403}
2404
Richard Henderson23326162013-07-08 14:55:59 -07002405static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002406{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002407 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002408
2409 /* Regions are assumed to support 1-4 byte accesses unless
2410 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002411 if (access_size_max == 0) {
2412 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002413 }
Richard Henderson23326162013-07-08 14:55:59 -07002414
2415 /* Bound the maximum access by the alignment of the address. */
2416 if (!mr->ops->impl.unaligned) {
2417 unsigned align_size_max = addr & -addr;
2418 if (align_size_max != 0 && align_size_max < access_size_max) {
2419 access_size_max = align_size_max;
2420 }
2421 }
2422
2423 /* Don't attempt accesses larger than the maximum. */
2424 if (l > access_size_max) {
2425 l = access_size_max;
2426 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002427 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002428
2429 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002430}
2431
Jan Kiszka4840f102015-06-18 18:47:22 +02002432static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002433{
Jan Kiszka4840f102015-06-18 18:47:22 +02002434 bool unlocked = !qemu_mutex_iothread_locked();
2435 bool release_lock = false;
2436
2437 if (unlocked && mr->global_locking) {
2438 qemu_mutex_lock_iothread();
2439 unlocked = false;
2440 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002441 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002442 if (mr->flush_coalesced_mmio) {
2443 if (unlocked) {
2444 qemu_mutex_lock_iothread();
2445 }
2446 qemu_flush_coalesced_mmio_buffer();
2447 if (unlocked) {
2448 qemu_mutex_unlock_iothread();
2449 }
2450 }
2451
2452 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002453}
2454
Peter Maydell5c9eb022015-04-26 16:49:24 +01002455MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2456 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002457{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002458 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002459 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002460 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002461 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002462 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002463 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002464 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002465
Paolo Bonzini41063e12015-03-18 14:21:43 +01002466 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002467 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002468 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002469 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002470
bellard13eb76e2004-01-24 15:23:36 +00002471 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002472 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002473 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002474 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002475 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002476 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002477 switch (l) {
2478 case 8:
2479 /* 64 bit write access */
2480 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002481 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2482 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002483 break;
2484 case 4:
bellard1c213d12005-09-03 10:49:04 +00002485 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002486 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002487 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2488 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002489 break;
2490 case 2:
bellard1c213d12005-09-03 10:49:04 +00002491 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002492 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002493 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2494 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002495 break;
2496 case 1:
bellard1c213d12005-09-03 10:49:04 +00002497 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002498 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002499 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2500 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002501 break;
2502 default:
2503 abort();
bellard13eb76e2004-01-24 15:23:36 +00002504 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002505 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002506 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002507 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002508 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002509 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002510 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002511 }
2512 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002513 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002514 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002515 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002516 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002517 switch (l) {
2518 case 8:
2519 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002520 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2521 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002522 stq_p(buf, val);
2523 break;
2524 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002525 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002526 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2527 attrs);
bellardc27004e2005-01-03 23:35:10 +00002528 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002529 break;
2530 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002531 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002532 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2533 attrs);
bellardc27004e2005-01-03 23:35:10 +00002534 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002535 break;
2536 case 1:
bellard1c213d12005-09-03 10:49:04 +00002537 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002538 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2539 attrs);
bellardc27004e2005-01-03 23:35:10 +00002540 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002541 break;
2542 default:
2543 abort();
bellard13eb76e2004-01-24 15:23:36 +00002544 }
2545 } else {
2546 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002547 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002548 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002549 }
2550 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002551
2552 if (release_lock) {
2553 qemu_mutex_unlock_iothread();
2554 release_lock = false;
2555 }
2556
bellard13eb76e2004-01-24 15:23:36 +00002557 len -= l;
2558 buf += l;
2559 addr += l;
2560 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002561 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002562
Peter Maydell3b643492015-04-26 16:49:23 +01002563 return result;
bellard13eb76e2004-01-24 15:23:36 +00002564}
bellard8df1cd02005-01-28 22:37:22 +00002565
Peter Maydell5c9eb022015-04-26 16:49:24 +01002566MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2567 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002568{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002569 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002570}
2571
Peter Maydell5c9eb022015-04-26 16:49:24 +01002572MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2573 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002574{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002575 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002576}
2577
2578
Avi Kivitya8170e52012-10-23 12:30:10 +02002579void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002580 int len, int is_write)
2581{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002582 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2583 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002584}
2585
Alexander Graf582b55a2013-12-11 14:17:44 +01002586enum write_rom_type {
2587 WRITE_DATA,
2588 FLUSH_CACHE,
2589};
2590
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002591static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002592 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002593{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002594 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002595 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002596 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002597 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002598
Paolo Bonzini41063e12015-03-18 14:21:43 +01002599 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002600 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002601 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002602 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002603
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002604 if (!(memory_region_is_ram(mr) ||
2605 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002606 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002607 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002608 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002609 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002610 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002611 switch (type) {
2612 case WRITE_DATA:
2613 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002614 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002615 break;
2616 case FLUSH_CACHE:
2617 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2618 break;
2619 }
bellardd0ecd2a2006-04-23 17:14:48 +00002620 }
2621 len -= l;
2622 buf += l;
2623 addr += l;
2624 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002625 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002626}
2627
Alexander Graf582b55a2013-12-11 14:17:44 +01002628/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002629void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002630 const uint8_t *buf, int len)
2631{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002632 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002633}
2634
2635void cpu_flush_icache_range(hwaddr start, int len)
2636{
2637 /*
2638 * This function should do the same thing as an icache flush that was
2639 * triggered from within the guest. For TCG we are always cache coherent,
2640 * so there is no need to flush anything. For KVM / Xen we need to flush
2641 * the host's instruction cache at least.
2642 */
2643 if (tcg_enabled()) {
2644 return;
2645 }
2646
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002647 cpu_physical_memory_write_rom_internal(&address_space_memory,
2648 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002649}
2650
aliguori6d16c2f2009-01-22 16:59:11 +00002651typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002652 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002653 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002654 hwaddr addr;
2655 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002656 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002657} BounceBuffer;
2658
2659static BounceBuffer bounce;
2660
aliguoriba223c22009-01-22 16:59:16 +00002661typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002662 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002663 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002664} MapClient;
2665
Fam Zheng38e047b2015-03-16 17:03:35 +08002666QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002667static QLIST_HEAD(map_client_list, MapClient) map_client_list
2668 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002669
Fam Zhenge95205e2015-03-16 17:03:37 +08002670static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002671{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002672 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002673 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002674}
2675
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002676static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002677{
2678 MapClient *client;
2679
Blue Swirl72cf2d42009-09-12 07:36:22 +00002680 while (!QLIST_EMPTY(&map_client_list)) {
2681 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002682 qemu_bh_schedule(client->bh);
2683 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002684 }
2685}
2686
Fam Zhenge95205e2015-03-16 17:03:37 +08002687void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002688{
2689 MapClient *client = g_malloc(sizeof(*client));
2690
Fam Zheng38e047b2015-03-16 17:03:35 +08002691 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002692 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002693 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002694 if (!atomic_read(&bounce.in_use)) {
2695 cpu_notify_map_clients_locked();
2696 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002697 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002698}
2699
Fam Zheng38e047b2015-03-16 17:03:35 +08002700void cpu_exec_init_all(void)
2701{
2702 qemu_mutex_init(&ram_list.mutex);
2703 memory_map_init();
2704 io_mem_init();
2705 qemu_mutex_init(&map_client_list_lock);
2706}
2707
Fam Zhenge95205e2015-03-16 17:03:37 +08002708void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002709{
Fam Zhenge95205e2015-03-16 17:03:37 +08002710 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002711
Fam Zhenge95205e2015-03-16 17:03:37 +08002712 qemu_mutex_lock(&map_client_list_lock);
2713 QLIST_FOREACH(client, &map_client_list, link) {
2714 if (client->bh == bh) {
2715 cpu_unregister_map_client_do(client);
2716 break;
2717 }
2718 }
2719 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002720}
2721
2722static void cpu_notify_map_clients(void)
2723{
Fam Zheng38e047b2015-03-16 17:03:35 +08002724 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002725 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002726 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002727}
2728
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002729bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2730{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002731 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002732 hwaddr l, xlat;
2733
Paolo Bonzini41063e12015-03-18 14:21:43 +01002734 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002735 while (len > 0) {
2736 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002737 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2738 if (!memory_access_is_direct(mr, is_write)) {
2739 l = memory_access_size(mr, l, addr);
2740 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002741 return false;
2742 }
2743 }
2744
2745 len -= l;
2746 addr += l;
2747 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002748 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002749 return true;
2750}
2751
aliguori6d16c2f2009-01-22 16:59:11 +00002752/* Map a physical memory region into a host virtual address.
2753 * May map a subset of the requested range, given by and returned in *plen.
2754 * May return NULL if resources needed to perform the mapping are exhausted.
2755 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002756 * Use cpu_register_map_client() to know when retrying the map operation is
2757 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002758 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002759void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002760 hwaddr addr,
2761 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002762 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002763{
Avi Kivitya8170e52012-10-23 12:30:10 +02002764 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002765 hwaddr done = 0;
2766 hwaddr l, xlat, base;
2767 MemoryRegion *mr, *this_mr;
2768 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002769
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002770 if (len == 0) {
2771 return NULL;
2772 }
aliguori6d16c2f2009-01-22 16:59:11 +00002773
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002774 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002775 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002776 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002777
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002778 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002779 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002780 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002781 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002782 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002783 /* Avoid unbounded allocations */
2784 l = MIN(l, TARGET_PAGE_SIZE);
2785 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002786 bounce.addr = addr;
2787 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002788
2789 memory_region_ref(mr);
2790 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002791 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002792 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2793 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002794 }
aliguori6d16c2f2009-01-22 16:59:11 +00002795
Paolo Bonzini41063e12015-03-18 14:21:43 +01002796 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002797 *plen = l;
2798 return bounce.buffer;
2799 }
2800
2801 base = xlat;
2802 raddr = memory_region_get_ram_addr(mr);
2803
2804 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002805 len -= l;
2806 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002807 done += l;
2808 if (len == 0) {
2809 break;
2810 }
2811
2812 l = len;
2813 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2814 if (this_mr != mr || xlat != base + done) {
2815 break;
2816 }
aliguori6d16c2f2009-01-22 16:59:11 +00002817 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002818
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002819 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002820 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002821 *plen = done;
2822 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002823}
2824
Avi Kivityac1970f2012-10-03 16:22:53 +02002825/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002826 * Will also mark the memory as dirty if is_write == 1. access_len gives
2827 * the amount of memory that was actually read or written by the caller.
2828 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002829void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2830 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002831{
2832 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002833 MemoryRegion *mr;
2834 ram_addr_t addr1;
2835
2836 mr = qemu_ram_addr_from_host(buffer, &addr1);
2837 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002838 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002839 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002840 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002841 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002842 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002843 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002844 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002845 return;
2846 }
2847 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002848 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2849 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002850 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002851 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002852 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002853 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002854 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002855 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002856}
bellardd0ecd2a2006-04-23 17:14:48 +00002857
Avi Kivitya8170e52012-10-23 12:30:10 +02002858void *cpu_physical_memory_map(hwaddr addr,
2859 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002860 int is_write)
2861{
2862 return address_space_map(&address_space_memory, addr, plen, is_write);
2863}
2864
Avi Kivitya8170e52012-10-23 12:30:10 +02002865void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2866 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002867{
2868 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2869}
2870
bellard8df1cd02005-01-28 22:37:22 +00002871/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002872static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2873 MemTxAttrs attrs,
2874 MemTxResult *result,
2875 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002876{
bellard8df1cd02005-01-28 22:37:22 +00002877 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002878 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002879 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002880 hwaddr l = 4;
2881 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002882 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002883 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002884
Paolo Bonzini41063e12015-03-18 14:21:43 +01002885 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002886 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002887 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002888 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002889
bellard8df1cd02005-01-28 22:37:22 +00002890 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002891 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002892#if defined(TARGET_WORDS_BIGENDIAN)
2893 if (endian == DEVICE_LITTLE_ENDIAN) {
2894 val = bswap32(val);
2895 }
2896#else
2897 if (endian == DEVICE_BIG_ENDIAN) {
2898 val = bswap32(val);
2899 }
2900#endif
bellard8df1cd02005-01-28 22:37:22 +00002901 } else {
2902 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002903 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002904 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002905 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002906 switch (endian) {
2907 case DEVICE_LITTLE_ENDIAN:
2908 val = ldl_le_p(ptr);
2909 break;
2910 case DEVICE_BIG_ENDIAN:
2911 val = ldl_be_p(ptr);
2912 break;
2913 default:
2914 val = ldl_p(ptr);
2915 break;
2916 }
Peter Maydell50013112015-04-26 16:49:24 +01002917 r = MEMTX_OK;
2918 }
2919 if (result) {
2920 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002921 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002922 if (release_lock) {
2923 qemu_mutex_unlock_iothread();
2924 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002925 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002926 return val;
2927}
2928
Peter Maydell50013112015-04-26 16:49:24 +01002929uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2930 MemTxAttrs attrs, MemTxResult *result)
2931{
2932 return address_space_ldl_internal(as, addr, attrs, result,
2933 DEVICE_NATIVE_ENDIAN);
2934}
2935
2936uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2937 MemTxAttrs attrs, MemTxResult *result)
2938{
2939 return address_space_ldl_internal(as, addr, attrs, result,
2940 DEVICE_LITTLE_ENDIAN);
2941}
2942
2943uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2944 MemTxAttrs attrs, MemTxResult *result)
2945{
2946 return address_space_ldl_internal(as, addr, attrs, result,
2947 DEVICE_BIG_ENDIAN);
2948}
2949
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002950uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002951{
Peter Maydell50013112015-04-26 16:49:24 +01002952 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002953}
2954
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002955uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002956{
Peter Maydell50013112015-04-26 16:49:24 +01002957 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002958}
2959
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002960uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002961{
Peter Maydell50013112015-04-26 16:49:24 +01002962 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002963}
2964
bellard84b7b8e2005-11-28 21:19:04 +00002965/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002966static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2967 MemTxAttrs attrs,
2968 MemTxResult *result,
2969 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002970{
bellard84b7b8e2005-11-28 21:19:04 +00002971 uint8_t *ptr;
2972 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002973 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002974 hwaddr l = 8;
2975 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002976 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002977 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002978
Paolo Bonzini41063e12015-03-18 14:21:43 +01002979 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002980 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002981 false);
2982 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002983 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002984
bellard84b7b8e2005-11-28 21:19:04 +00002985 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002986 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002987#if defined(TARGET_WORDS_BIGENDIAN)
2988 if (endian == DEVICE_LITTLE_ENDIAN) {
2989 val = bswap64(val);
2990 }
2991#else
2992 if (endian == DEVICE_BIG_ENDIAN) {
2993 val = bswap64(val);
2994 }
2995#endif
bellard84b7b8e2005-11-28 21:19:04 +00002996 } else {
2997 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002998 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002999 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003000 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003001 switch (endian) {
3002 case DEVICE_LITTLE_ENDIAN:
3003 val = ldq_le_p(ptr);
3004 break;
3005 case DEVICE_BIG_ENDIAN:
3006 val = ldq_be_p(ptr);
3007 break;
3008 default:
3009 val = ldq_p(ptr);
3010 break;
3011 }
Peter Maydell50013112015-04-26 16:49:24 +01003012 r = MEMTX_OK;
3013 }
3014 if (result) {
3015 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003016 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003017 if (release_lock) {
3018 qemu_mutex_unlock_iothread();
3019 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003020 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003021 return val;
3022}
3023
Peter Maydell50013112015-04-26 16:49:24 +01003024uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3025 MemTxAttrs attrs, MemTxResult *result)
3026{
3027 return address_space_ldq_internal(as, addr, attrs, result,
3028 DEVICE_NATIVE_ENDIAN);
3029}
3030
3031uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3032 MemTxAttrs attrs, MemTxResult *result)
3033{
3034 return address_space_ldq_internal(as, addr, attrs, result,
3035 DEVICE_LITTLE_ENDIAN);
3036}
3037
3038uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3039 MemTxAttrs attrs, MemTxResult *result)
3040{
3041 return address_space_ldq_internal(as, addr, attrs, result,
3042 DEVICE_BIG_ENDIAN);
3043}
3044
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003045uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003046{
Peter Maydell50013112015-04-26 16:49:24 +01003047 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003048}
3049
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003050uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003051{
Peter Maydell50013112015-04-26 16:49:24 +01003052 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003053}
3054
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003055uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003056{
Peter Maydell50013112015-04-26 16:49:24 +01003057 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003058}
3059
bellardaab33092005-10-30 20:48:42 +00003060/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003061uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3062 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003063{
3064 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003065 MemTxResult r;
3066
3067 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3068 if (result) {
3069 *result = r;
3070 }
bellardaab33092005-10-30 20:48:42 +00003071 return val;
3072}
3073
Peter Maydell50013112015-04-26 16:49:24 +01003074uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3075{
3076 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3077}
3078
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003079/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003080static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3081 hwaddr addr,
3082 MemTxAttrs attrs,
3083 MemTxResult *result,
3084 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003085{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003086 uint8_t *ptr;
3087 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003088 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003089 hwaddr l = 2;
3090 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003091 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003092 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003093
Paolo Bonzini41063e12015-03-18 14:21:43 +01003094 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003095 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003096 false);
3097 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003098 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003099
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003100 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003101 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003102#if defined(TARGET_WORDS_BIGENDIAN)
3103 if (endian == DEVICE_LITTLE_ENDIAN) {
3104 val = bswap16(val);
3105 }
3106#else
3107 if (endian == DEVICE_BIG_ENDIAN) {
3108 val = bswap16(val);
3109 }
3110#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003111 } else {
3112 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003113 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003114 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003115 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003116 switch (endian) {
3117 case DEVICE_LITTLE_ENDIAN:
3118 val = lduw_le_p(ptr);
3119 break;
3120 case DEVICE_BIG_ENDIAN:
3121 val = lduw_be_p(ptr);
3122 break;
3123 default:
3124 val = lduw_p(ptr);
3125 break;
3126 }
Peter Maydell50013112015-04-26 16:49:24 +01003127 r = MEMTX_OK;
3128 }
3129 if (result) {
3130 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003131 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003132 if (release_lock) {
3133 qemu_mutex_unlock_iothread();
3134 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003135 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003136 return val;
bellardaab33092005-10-30 20:48:42 +00003137}
3138
Peter Maydell50013112015-04-26 16:49:24 +01003139uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3140 MemTxAttrs attrs, MemTxResult *result)
3141{
3142 return address_space_lduw_internal(as, addr, attrs, result,
3143 DEVICE_NATIVE_ENDIAN);
3144}
3145
3146uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3147 MemTxAttrs attrs, MemTxResult *result)
3148{
3149 return address_space_lduw_internal(as, addr, attrs, result,
3150 DEVICE_LITTLE_ENDIAN);
3151}
3152
3153uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3154 MemTxAttrs attrs, MemTxResult *result)
3155{
3156 return address_space_lduw_internal(as, addr, attrs, result,
3157 DEVICE_BIG_ENDIAN);
3158}
3159
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003160uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161{
Peter Maydell50013112015-04-26 16:49:24 +01003162 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003163}
3164
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003165uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003166{
Peter Maydell50013112015-04-26 16:49:24 +01003167 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003168}
3169
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003170uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003171{
Peter Maydell50013112015-04-26 16:49:24 +01003172 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003173}
3174
bellard8df1cd02005-01-28 22:37:22 +00003175/* warning: addr must be aligned. The ram page is not masked as dirty
3176 and the code inside is not invalidated. It is useful if the dirty
3177 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003178void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3179 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003180{
bellard8df1cd02005-01-28 22:37:22 +00003181 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003182 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003183 hwaddr l = 4;
3184 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003185 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003186 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003187 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003188
Paolo Bonzini41063e12015-03-18 14:21:43 +01003189 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003190 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003191 true);
3192 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003193 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003194
Peter Maydell50013112015-04-26 16:49:24 +01003195 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003196 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003197 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003198 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003199 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003200
Paolo Bonzini845b6212015-03-23 11:45:53 +01003201 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3202 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003203 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003204 r = MEMTX_OK;
3205 }
3206 if (result) {
3207 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003208 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003209 if (release_lock) {
3210 qemu_mutex_unlock_iothread();
3211 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003212 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003213}
3214
Peter Maydell50013112015-04-26 16:49:24 +01003215void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3216{
3217 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3218}
3219
bellard8df1cd02005-01-28 22:37:22 +00003220/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003221static inline void address_space_stl_internal(AddressSpace *as,
3222 hwaddr addr, uint32_t val,
3223 MemTxAttrs attrs,
3224 MemTxResult *result,
3225 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003226{
bellard8df1cd02005-01-28 22:37:22 +00003227 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003228 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003229 hwaddr l = 4;
3230 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003231 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003232 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003233
Paolo Bonzini41063e12015-03-18 14:21:43 +01003234 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003235 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003236 true);
3237 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003238 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003239
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003240#if defined(TARGET_WORDS_BIGENDIAN)
3241 if (endian == DEVICE_LITTLE_ENDIAN) {
3242 val = bswap32(val);
3243 }
3244#else
3245 if (endian == DEVICE_BIG_ENDIAN) {
3246 val = bswap32(val);
3247 }
3248#endif
Peter Maydell50013112015-04-26 16:49:24 +01003249 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003250 } else {
bellard8df1cd02005-01-28 22:37:22 +00003251 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003252 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003253 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003254 switch (endian) {
3255 case DEVICE_LITTLE_ENDIAN:
3256 stl_le_p(ptr, val);
3257 break;
3258 case DEVICE_BIG_ENDIAN:
3259 stl_be_p(ptr, val);
3260 break;
3261 default:
3262 stl_p(ptr, val);
3263 break;
3264 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003265 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003266 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003267 }
Peter Maydell50013112015-04-26 16:49:24 +01003268 if (result) {
3269 *result = r;
3270 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003271 if (release_lock) {
3272 qemu_mutex_unlock_iothread();
3273 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003274 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003275}
3276
3277void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3278 MemTxAttrs attrs, MemTxResult *result)
3279{
3280 address_space_stl_internal(as, addr, val, attrs, result,
3281 DEVICE_NATIVE_ENDIAN);
3282}
3283
3284void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3285 MemTxAttrs attrs, MemTxResult *result)
3286{
3287 address_space_stl_internal(as, addr, val, attrs, result,
3288 DEVICE_LITTLE_ENDIAN);
3289}
3290
3291void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3292 MemTxAttrs attrs, MemTxResult *result)
3293{
3294 address_space_stl_internal(as, addr, val, attrs, result,
3295 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003296}
3297
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003298void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299{
Peter Maydell50013112015-04-26 16:49:24 +01003300 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003301}
3302
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003303void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003304{
Peter Maydell50013112015-04-26 16:49:24 +01003305 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003306}
3307
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003308void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003309{
Peter Maydell50013112015-04-26 16:49:24 +01003310 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003311}
3312
bellardaab33092005-10-30 20:48:42 +00003313/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003314void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3315 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003316{
3317 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003318 MemTxResult r;
3319
3320 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3321 if (result) {
3322 *result = r;
3323 }
3324}
3325
3326void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3327{
3328 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003329}
3330
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003331/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003332static inline void address_space_stw_internal(AddressSpace *as,
3333 hwaddr addr, uint32_t val,
3334 MemTxAttrs attrs,
3335 MemTxResult *result,
3336 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003337{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003338 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003339 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003340 hwaddr l = 2;
3341 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003342 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003343 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003344
Paolo Bonzini41063e12015-03-18 14:21:43 +01003345 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003346 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003347 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003348 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003349
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003350#if defined(TARGET_WORDS_BIGENDIAN)
3351 if (endian == DEVICE_LITTLE_ENDIAN) {
3352 val = bswap16(val);
3353 }
3354#else
3355 if (endian == DEVICE_BIG_ENDIAN) {
3356 val = bswap16(val);
3357 }
3358#endif
Peter Maydell50013112015-04-26 16:49:24 +01003359 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003360 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003361 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003362 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003363 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364 switch (endian) {
3365 case DEVICE_LITTLE_ENDIAN:
3366 stw_le_p(ptr, val);
3367 break;
3368 case DEVICE_BIG_ENDIAN:
3369 stw_be_p(ptr, val);
3370 break;
3371 default:
3372 stw_p(ptr, val);
3373 break;
3374 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003375 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003376 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003377 }
Peter Maydell50013112015-04-26 16:49:24 +01003378 if (result) {
3379 *result = r;
3380 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003381 if (release_lock) {
3382 qemu_mutex_unlock_iothread();
3383 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003384 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003385}
3386
3387void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3388 MemTxAttrs attrs, MemTxResult *result)
3389{
3390 address_space_stw_internal(as, addr, val, attrs, result,
3391 DEVICE_NATIVE_ENDIAN);
3392}
3393
3394void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3395 MemTxAttrs attrs, MemTxResult *result)
3396{
3397 address_space_stw_internal(as, addr, val, attrs, result,
3398 DEVICE_LITTLE_ENDIAN);
3399}
3400
3401void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3402 MemTxAttrs attrs, MemTxResult *result)
3403{
3404 address_space_stw_internal(as, addr, val, attrs, result,
3405 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003406}
3407
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003408void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003409{
Peter Maydell50013112015-04-26 16:49:24 +01003410 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003411}
3412
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003413void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003414{
Peter Maydell50013112015-04-26 16:49:24 +01003415 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003416}
3417
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003418void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003419{
Peter Maydell50013112015-04-26 16:49:24 +01003420 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003421}
3422
bellardaab33092005-10-30 20:48:42 +00003423/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003424void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3425 MemTxAttrs attrs, MemTxResult *result)
3426{
3427 MemTxResult r;
3428 val = tswap64(val);
3429 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3430 if (result) {
3431 *result = r;
3432 }
3433}
3434
3435void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3436 MemTxAttrs attrs, MemTxResult *result)
3437{
3438 MemTxResult r;
3439 val = cpu_to_le64(val);
3440 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3441 if (result) {
3442 *result = r;
3443 }
3444}
3445void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3446 MemTxAttrs attrs, MemTxResult *result)
3447{
3448 MemTxResult r;
3449 val = cpu_to_be64(val);
3450 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3451 if (result) {
3452 *result = r;
3453 }
3454}
3455
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003456void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003457{
Peter Maydell50013112015-04-26 16:49:24 +01003458 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003459}
3460
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003461void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003462{
Peter Maydell50013112015-04-26 16:49:24 +01003463 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003464}
3465
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003466void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003467{
Peter Maydell50013112015-04-26 16:49:24 +01003468 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003469}
3470
aliguori5e2972f2009-03-28 17:51:36 +00003471/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003472int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003473 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003474{
3475 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003476 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003477 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003478
3479 while (len > 0) {
3480 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003481 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003482 /* if no physical page mapped, return an error */
3483 if (phys_addr == -1)
3484 return -1;
3485 l = (page + TARGET_PAGE_SIZE) - addr;
3486 if (l > len)
3487 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003488 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003489 if (is_write) {
3490 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3491 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003492 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3493 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003494 }
bellard13eb76e2004-01-24 15:23:36 +00003495 len -= l;
3496 buf += l;
3497 addr += l;
3498 }
3499 return 0;
3500}
Paul Brooka68fe892010-03-01 00:08:59 +00003501#endif
bellard13eb76e2004-01-24 15:23:36 +00003502
Blue Swirl8e4a4242013-01-06 18:30:17 +00003503/*
3504 * A helper function for the _utterly broken_ virtio device model to find out if
3505 * it's running on a big endian machine. Don't do this at home kids!
3506 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003507bool target_words_bigendian(void);
3508bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003509{
3510#if defined(TARGET_WORDS_BIGENDIAN)
3511 return true;
3512#else
3513 return false;
3514#endif
3515}
3516
Wen Congyang76f35532012-05-07 12:04:18 +08003517#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003518bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003519{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003520 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003521 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003522 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003523
Paolo Bonzini41063e12015-03-18 14:21:43 +01003524 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003525 mr = address_space_translate(&address_space_memory,
3526 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003527
Paolo Bonzini41063e12015-03-18 14:21:43 +01003528 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3529 rcu_read_unlock();
3530 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003531}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003532
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003533int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003534{
3535 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003536 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003537
Mike Day0dc3f442013-09-05 14:41:35 -04003538 rcu_read_lock();
3539 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003540 ret = func(block->idstr, block->host, block->offset,
3541 block->used_length, opaque);
3542 if (ret) {
3543 break;
3544 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003545 }
Mike Day0dc3f442013-09-05 14:41:35 -04003546 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003547 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003548}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003549#endif