blob: d817e5f0250152983f2eb5788c4d3c5565f80497 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020093DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andreas Färber1a1562f2013-06-17 04:09:11 +0200481const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200487 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200490 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200495 }
496};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200497
pbrook9656f322008-07-01 20:01:19 +0000498#endif
499
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100500CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400501{
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400503
Andreas Färberbdc44642013-06-24 23:50:24 +0200504 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100505 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200506 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100507 }
Glauber Costa950f1472009-06-09 12:15:18 -0400508 }
509
Andreas Färberbdc44642013-06-24 23:50:24 +0200510 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400511}
512
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
Bharata B Raob7bca732015-06-23 19:31:13 -0700529#ifndef CONFIG_USER_ONLY
530static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
531
532static int cpu_get_free_index(Error **errp)
533{
534 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
535
536 if (cpu >= MAX_CPUMASK_BITS) {
537 error_setg(errp, "Trying to use more CPUs than max of %d",
538 MAX_CPUMASK_BITS);
539 return -1;
540 }
541
542 bitmap_set(cpu_index_map, cpu, 1);
543 return cpu;
544}
545
546void cpu_exec_exit(CPUState *cpu)
547{
548 if (cpu->cpu_index == -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
550 return;
551 }
552
553 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
554 cpu->cpu_index = -1;
555}
556#else
557
558static int cpu_get_free_index(Error **errp)
559{
560 CPUState *some_cpu;
561 int cpu_index = 0;
562
563 CPU_FOREACH(some_cpu) {
564 cpu_index++;
565 }
566 return cpu_index;
567}
568
569void cpu_exec_exit(CPUState *cpu)
570{
571}
572#endif
573
Bharata B Rao5a790cc2015-06-23 19:31:12 -0700574void cpu_exec_init(CPUArchState *env, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000575{
Andreas Färber9f09e182012-05-03 06:59:07 +0200576 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100577 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000578 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700579 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000580
Eduardo Habkost291135b2015-04-27 17:00:33 -0300581#ifndef CONFIG_USER_ONLY
582 cpu->as = &address_space_memory;
583 cpu->thread_id = qemu_get_thread_id();
584 cpu_reload_memory_map(cpu);
585#endif
586
pbrookc2764712009-03-07 15:24:59 +0000587#if defined(CONFIG_USER_ONLY)
588 cpu_list_lock();
589#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700590 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
591 if (local_err) {
592 error_propagate(errp, local_err);
593#if defined(CONFIG_USER_ONLY)
594 cpu_list_unlock();
595#endif
596 return;
bellard6a00d602005-11-21 23:25:50 +0000597 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200598 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000599#if defined(CONFIG_USER_ONLY)
600 cpu_list_unlock();
601#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200602 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
603 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
604 }
pbrookb3c77242008-06-30 16:31:04 +0000605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600606 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000607 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100608 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200609 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000610#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100611 if (cc->vmsd != NULL) {
612 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
613 }
bellardfd6ce8f2003-05-14 19:00:11 +0000614}
615
Paul Brook94df27f2010-02-28 23:47:45 +0000616#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200617static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000618{
619 tb_invalidate_phys_page_range(pc, pc + 1, 0);
620}
621#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200622static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400623{
Max Filippove8262a12013-09-27 22:29:17 +0400624 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
625 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000626 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100627 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400628 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400629}
bellardc27004e2005-01-03 23:35:10 +0000630#endif
bellardd720b932004-04-25 17:57:43 +0000631
Paul Brookc527ee82010-03-01 03:31:14 +0000632#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200633void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000634
635{
636}
637
Peter Maydell3ee887e2014-09-12 14:06:48 +0100638int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
639 int flags)
640{
641 return -ENOSYS;
642}
643
644void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
645{
646}
647
Andreas Färber75a34032013-09-02 16:57:02 +0200648int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000649 int flags, CPUWatchpoint **watchpoint)
650{
651 return -ENOSYS;
652}
653#else
pbrook6658ffb2007-03-16 23:58:11 +0000654/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200655int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000656 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000657{
aliguoric0ce9982008-11-25 22:13:57 +0000658 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000659
Peter Maydell05068c02014-09-12 14:06:48 +0100660 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700661 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200662 error_report("tried to set invalid watchpoint at %"
663 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000664 return -EINVAL;
665 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500666 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000667
aliguoria1d1bb32008-11-18 20:07:32 +0000668 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100669 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000670 wp->flags = flags;
671
aliguori2dc9f412008-11-18 20:56:59 +0000672 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200673 if (flags & BP_GDB) {
674 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
675 } else {
676 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
677 }
aliguoria1d1bb32008-11-18 20:07:32 +0000678
Andreas Färber31b030d2013-09-04 01:29:02 +0200679 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000680
681 if (watchpoint)
682 *watchpoint = wp;
683 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000684}
685
aliguoria1d1bb32008-11-18 20:07:32 +0000686/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200687int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000688 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000689{
aliguoria1d1bb32008-11-18 20:07:32 +0000690 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000691
Andreas Färberff4700b2013-08-26 18:23:18 +0200692 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100693 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000694 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200695 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000696 return 0;
697 }
698 }
aliguoria1d1bb32008-11-18 20:07:32 +0000699 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000700}
701
aliguoria1d1bb32008-11-18 20:07:32 +0000702/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200703void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000704{
Andreas Färberff4700b2013-08-26 18:23:18 +0200705 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000706
Andreas Färber31b030d2013-09-04 01:29:02 +0200707 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000708
Anthony Liguori7267c092011-08-20 22:09:37 -0500709 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000710}
711
aliguoria1d1bb32008-11-18 20:07:32 +0000712/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200713void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000714{
aliguoric0ce9982008-11-25 22:13:57 +0000715 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000716
Andreas Färberff4700b2013-08-26 18:23:18 +0200717 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200718 if (wp->flags & mask) {
719 cpu_watchpoint_remove_by_ref(cpu, wp);
720 }
aliguoric0ce9982008-11-25 22:13:57 +0000721 }
aliguoria1d1bb32008-11-18 20:07:32 +0000722}
Peter Maydell05068c02014-09-12 14:06:48 +0100723
724/* Return true if this watchpoint address matches the specified
725 * access (ie the address range covered by the watchpoint overlaps
726 * partially or completely with the address range covered by the
727 * access).
728 */
729static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
730 vaddr addr,
731 vaddr len)
732{
733 /* We know the lengths are non-zero, but a little caution is
734 * required to avoid errors in the case where the range ends
735 * exactly at the top of the address space and so addr + len
736 * wraps round to zero.
737 */
738 vaddr wpend = wp->vaddr + wp->len - 1;
739 vaddr addrend = addr + len - 1;
740
741 return !(addr > wpend || wp->vaddr > addrend);
742}
743
Paul Brookc527ee82010-03-01 03:31:14 +0000744#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000745
746/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200747int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000748 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000749{
aliguoric0ce9982008-11-25 22:13:57 +0000750 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000751
Anthony Liguori7267c092011-08-20 22:09:37 -0500752 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000753
754 bp->pc = pc;
755 bp->flags = flags;
756
aliguori2dc9f412008-11-18 20:56:59 +0000757 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200758 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200759 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200760 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200761 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200762 }
aliguoria1d1bb32008-11-18 20:07:32 +0000763
Andreas Färberf0c3c502013-08-26 21:22:53 +0200764 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000765
Andreas Färber00b941e2013-06-29 18:55:54 +0200766 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000767 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200768 }
aliguoria1d1bb32008-11-18 20:07:32 +0000769 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000770}
771
772/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200773int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000774{
aliguoria1d1bb32008-11-18 20:07:32 +0000775 CPUBreakpoint *bp;
776
Andreas Färberf0c3c502013-08-26 21:22:53 +0200777 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000778 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200779 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000780 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000781 }
bellard4c3a88a2003-07-26 12:06:08 +0000782 }
aliguoria1d1bb32008-11-18 20:07:32 +0000783 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000784}
785
aliguoria1d1bb32008-11-18 20:07:32 +0000786/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200787void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000788{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200789 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
790
791 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000792
Anthony Liguori7267c092011-08-20 22:09:37 -0500793 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000794}
795
796/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200797void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000798{
aliguoric0ce9982008-11-25 22:13:57 +0000799 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000800
Andreas Färberf0c3c502013-08-26 21:22:53 +0200801 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200802 if (bp->flags & mask) {
803 cpu_breakpoint_remove_by_ref(cpu, bp);
804 }
aliguoric0ce9982008-11-25 22:13:57 +0000805 }
bellard4c3a88a2003-07-26 12:06:08 +0000806}
807
bellardc33a3462003-07-29 20:50:33 +0000808/* enable or disable single step mode. EXCP_DEBUG is returned by the
809 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200810void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000811{
Andreas Färbered2803d2013-06-21 20:20:45 +0200812 if (cpu->singlestep_enabled != enabled) {
813 cpu->singlestep_enabled = enabled;
814 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200815 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200816 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100817 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000818 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200819 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000820 tb_flush(env);
821 }
bellardc33a3462003-07-29 20:50:33 +0000822 }
bellardc33a3462003-07-29 20:50:33 +0000823}
824
Andreas Färbera47dddd2013-09-03 17:38:47 +0200825void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000826{
827 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000828 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000829
830 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000831 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000832 fprintf(stderr, "qemu: fatal: ");
833 vfprintf(stderr, fmt, ap);
834 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200835 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000836 if (qemu_log_enabled()) {
837 qemu_log("qemu: fatal: ");
838 qemu_log_vprintf(fmt, ap2);
839 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200840 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000841 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000842 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000843 }
pbrook493ae1f2007-11-23 16:53:59 +0000844 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000845 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200846#if defined(CONFIG_USER_ONLY)
847 {
848 struct sigaction act;
849 sigfillset(&act.sa_mask);
850 act.sa_handler = SIG_DFL;
851 sigaction(SIGABRT, &act, NULL);
852 }
853#endif
bellard75012672003-06-21 13:11:07 +0000854 abort();
855}
856
bellard01243112004-01-04 15:48:17 +0000857#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400858/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200859static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
860{
861 RAMBlock *block;
862
Paolo Bonzini43771532013-09-09 17:58:40 +0200863 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200864 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200865 goto found;
866 }
Mike Day0dc3f442013-09-05 14:41:35 -0400867 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200868 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200869 goto found;
870 }
871 }
872
873 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
874 abort();
875
876found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200877 /* It is safe to write mru_block outside the iothread lock. This
878 * is what happens:
879 *
880 * mru_block = xxx
881 * rcu_read_unlock()
882 * xxx removed from list
883 * rcu_read_lock()
884 * read mru_block
885 * mru_block = NULL;
886 * call_rcu(reclaim_ramblock, xxx);
887 * rcu_read_unlock()
888 *
889 * atomic_rcu_set is not needed here. The block was already published
890 * when it was placed into the list. Here we're just making an extra
891 * copy of the pointer.
892 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200893 ram_list.mru_block = block;
894 return block;
895}
896
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200897static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000898{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200899 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200900 RAMBlock *block;
901 ram_addr_t end;
902
903 end = TARGET_PAGE_ALIGN(start + length);
904 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000905
Mike Day0dc3f442013-09-05 14:41:35 -0400906 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200907 block = qemu_get_ram_block(start);
908 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200909 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000910 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400911 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200912}
913
914/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000915bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
916 ram_addr_t length,
917 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200918{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000919 unsigned long end, page;
920 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200921
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000922 if (length == 0) {
923 return false;
924 }
925
926 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
927 page = start >> TARGET_PAGE_BITS;
928 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
929 page, end - page);
930
931 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200932 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200933 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000934
935 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000936}
937
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100938/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200939hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200940 MemoryRegionSection *section,
941 target_ulong vaddr,
942 hwaddr paddr, hwaddr xlat,
943 int prot,
944 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000945{
Avi Kivitya8170e52012-10-23 12:30:10 +0200946 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000947 CPUWatchpoint *wp;
948
Blue Swirlcc5bea62012-04-14 14:56:48 +0000949 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000950 /* Normal RAM. */
951 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200952 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000953 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200954 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000955 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200956 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000957 }
958 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100959 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200960 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000961 }
962
963 /* Make accesses to pages with watchpoints go via the
964 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200965 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100966 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000967 /* Avoid trapping reads of pages with a write breakpoint. */
968 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200969 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000970 *address |= TLB_MMIO;
971 break;
972 }
973 }
974 }
975
976 return iotlb;
977}
bellard9fa3e852004-01-04 18:06:42 +0000978#endif /* defined(CONFIG_USER_ONLY) */
979
pbrooke2eef172008-06-08 01:09:01 +0000980#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000981
Anthony Liguoric227f092009-10-01 16:12:16 -0500982static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200983 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200984static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200985
Igor Mammedova2b257d2014-10-31 16:38:37 +0000986static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
987 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200988
989/*
990 * Set a custom physical guest memory alloator.
991 * Accelerators with unusual needs may need this. Hopefully, we can
992 * get rid of it eventually.
993 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000994void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200995{
996 phys_mem_alloc = alloc;
997}
998
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200999static uint16_t phys_section_add(PhysPageMap *map,
1000 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001001{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001002 /* The physical section number is ORed with a page-aligned
1003 * pointer to produce the iotlb entries. Thus it should
1004 * never overflow into the page-aligned value.
1005 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001006 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001007
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001008 if (map->sections_nb == map->sections_nb_alloc) {
1009 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1010 map->sections = g_renew(MemoryRegionSection, map->sections,
1011 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001012 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001013 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001014 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001015 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001016}
1017
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001018static void phys_section_destroy(MemoryRegion *mr)
1019{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001020 memory_region_unref(mr);
1021
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001022 if (mr->subpage) {
1023 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001024 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001025 g_free(subpage);
1026 }
1027}
1028
Paolo Bonzini60926662013-05-29 12:30:26 +02001029static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001030{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001031 while (map->sections_nb > 0) {
1032 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001033 phys_section_destroy(section->mr);
1034 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001035 g_free(map->sections);
1036 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001037}
1038
Avi Kivityac1970f2012-10-03 16:22:53 +02001039static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001040{
1041 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001042 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001043 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001044 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001045 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001046 MemoryRegionSection subsection = {
1047 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001048 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001049 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001050 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001051
Avi Kivityf3705d52012-03-08 16:16:34 +02001052 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001053
Avi Kivityf3705d52012-03-08 16:16:34 +02001054 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001055 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001056 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001057 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001058 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001059 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001060 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001061 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001062 }
1063 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001064 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001065 subpage_register(subpage, start, end,
1066 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001067}
1068
1069
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001070static void register_multipage(AddressSpaceDispatch *d,
1071 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001072{
Avi Kivitya8170e52012-10-23 12:30:10 +02001073 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001074 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001075 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1076 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001077
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001078 assert(num_pages);
1079 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001080}
1081
Avi Kivityac1970f2012-10-03 16:22:53 +02001082static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001083{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001084 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001085 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001086 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001087 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001088
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001089 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1090 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1091 - now.offset_within_address_space;
1092
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001093 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001094 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001095 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001096 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001097 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001098 while (int128_ne(remain.size, now.size)) {
1099 remain.size = int128_sub(remain.size, now.size);
1100 remain.offset_within_address_space += int128_get64(now.size);
1101 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001102 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001103 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001104 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001105 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001106 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001107 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001108 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001110 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001111 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112 }
1113}
1114
Sheng Yang62a27442010-01-26 19:21:16 +08001115void qemu_flush_coalesced_mmio_buffer(void)
1116{
1117 if (kvm_enabled())
1118 kvm_flush_coalesced_mmio_buffer();
1119}
1120
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001121void qemu_mutex_lock_ramlist(void)
1122{
1123 qemu_mutex_lock(&ram_list.mutex);
1124}
1125
1126void qemu_mutex_unlock_ramlist(void)
1127{
1128 qemu_mutex_unlock(&ram_list.mutex);
1129}
1130
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001131#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001132
1133#include <sys/vfs.h>
1134
1135#define HUGETLBFS_MAGIC 0x958458f6
1136
Hu Taofc7a5802014-09-09 13:28:01 +08001137static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001138{
1139 struct statfs fs;
1140 int ret;
1141
1142 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001143 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001144 } while (ret != 0 && errno == EINTR);
1145
1146 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001147 error_setg_errno(errp, errno, "failed to get page size of file %s",
1148 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001149 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001150 }
1151
1152 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001153 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001154
1155 return fs.f_bsize;
1156}
1157
Alex Williamson04b16652010-07-02 11:13:17 -06001158static void *file_ram_alloc(RAMBlock *block,
1159 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001160 const char *path,
1161 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001162{
1163 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001164 char *sanitized_name;
1165 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001166 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001167 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001168 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001169 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001170
Hu Taofc7a5802014-09-09 13:28:01 +08001171 hpagesize = gethugepagesize(path, &local_err);
1172 if (local_err) {
1173 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001174 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001175 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001176 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001177
1178 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001179 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1180 "or larger than huge page size 0x%" PRIx64,
1181 memory, hpagesize);
1182 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001183 }
1184
1185 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001186 error_setg(errp,
1187 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001188 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001189 }
1190
Peter Feiner8ca761f2013-03-04 13:54:25 -05001191 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001192 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001193 for (c = sanitized_name; *c != '\0'; c++) {
1194 if (*c == '/')
1195 *c = '_';
1196 }
1197
1198 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1199 sanitized_name);
1200 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001201
1202 fd = mkstemp(filename);
1203 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001204 error_setg_errno(errp, errno,
1205 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001206 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001207 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001208 }
1209 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001210 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001211
1212 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1213
1214 /*
1215 * ftruncate is not supported by hugetlbfs in older
1216 * hosts, so don't bother bailing out on errors.
1217 * If anything goes wrong with it under other filesystems,
1218 * mmap will fail.
1219 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001220 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001221 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001222 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001223
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001224 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1225 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1226 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001227 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001228 error_setg_errno(errp, errno,
1229 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001230 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001231 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001232 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001233
1234 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001235 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001236 }
1237
Alex Williamson04b16652010-07-02 11:13:17 -06001238 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001239 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001240
1241error:
1242 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001243 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001244 exit(1);
1245 }
1246 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001247}
1248#endif
1249
Mike Day0dc3f442013-09-05 14:41:35 -04001250/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001251static ram_addr_t find_ram_offset(ram_addr_t size)
1252{
Alex Williamson04b16652010-07-02 11:13:17 -06001253 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001254 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001255
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001256 assert(size != 0); /* it would hand out same offset multiple times */
1257
Mike Day0dc3f442013-09-05 14:41:35 -04001258 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001259 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001260 }
Alex Williamson04b16652010-07-02 11:13:17 -06001261
Mike Day0dc3f442013-09-05 14:41:35 -04001262 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001263 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001264
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001265 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001266
Mike Day0dc3f442013-09-05 14:41:35 -04001267 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001268 if (next_block->offset >= end) {
1269 next = MIN(next, next_block->offset);
1270 }
1271 }
1272 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001273 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001274 mingap = next - end;
1275 }
1276 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001277
1278 if (offset == RAM_ADDR_MAX) {
1279 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1280 (uint64_t)size);
1281 abort();
1282 }
1283
Alex Williamson04b16652010-07-02 11:13:17 -06001284 return offset;
1285}
1286
Juan Quintela652d7ec2012-07-20 10:37:54 +02001287ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001288{
Alex Williamsond17b5282010-06-25 11:08:38 -06001289 RAMBlock *block;
1290 ram_addr_t last = 0;
1291
Mike Day0dc3f442013-09-05 14:41:35 -04001292 rcu_read_lock();
1293 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001294 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001295 }
Mike Day0dc3f442013-09-05 14:41:35 -04001296 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001297 return last;
1298}
1299
Jason Baronddb97f12012-08-02 15:44:16 -04001300static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1301{
1302 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001303
1304 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001305 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001306 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1307 if (ret) {
1308 perror("qemu_madvise");
1309 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1310 "but dump_guest_core=off specified\n");
1311 }
1312 }
1313}
1314
Mike Day0dc3f442013-09-05 14:41:35 -04001315/* Called within an RCU critical section, or while the ramlist lock
1316 * is held.
1317 */
Hu Tao20cfe882014-04-02 15:13:26 +08001318static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001319{
Hu Tao20cfe882014-04-02 15:13:26 +08001320 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001321
Mike Day0dc3f442013-09-05 14:41:35 -04001322 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001323 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001324 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001325 }
1326 }
Hu Tao20cfe882014-04-02 15:13:26 +08001327
1328 return NULL;
1329}
1330
Mike Dayae3a7042013-09-05 14:41:35 -04001331/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001332void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1333{
Mike Dayae3a7042013-09-05 14:41:35 -04001334 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001335
Mike Day0dc3f442013-09-05 14:41:35 -04001336 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001337 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001338 assert(new_block);
1339 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001340
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001341 if (dev) {
1342 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001343 if (id) {
1344 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001345 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001346 }
1347 }
1348 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1349
Mike Day0dc3f442013-09-05 14:41:35 -04001350 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001351 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001352 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1353 new_block->idstr);
1354 abort();
1355 }
1356 }
Mike Day0dc3f442013-09-05 14:41:35 -04001357 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001358}
1359
Mike Dayae3a7042013-09-05 14:41:35 -04001360/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001361void qemu_ram_unset_idstr(ram_addr_t addr)
1362{
Mike Dayae3a7042013-09-05 14:41:35 -04001363 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001364
Mike Dayae3a7042013-09-05 14:41:35 -04001365 /* FIXME: arch_init.c assumes that this is not called throughout
1366 * migration. Ignore the problem since hot-unplug during migration
1367 * does not work anyway.
1368 */
1369
Mike Day0dc3f442013-09-05 14:41:35 -04001370 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001371 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001372 if (block) {
1373 memset(block->idstr, 0, sizeof(block->idstr));
1374 }
Mike Day0dc3f442013-09-05 14:41:35 -04001375 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001376}
1377
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001378static int memory_try_enable_merging(void *addr, size_t len)
1379{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001380 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001381 /* disabled by the user */
1382 return 0;
1383 }
1384
1385 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1386}
1387
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001388/* Only legal before guest might have detected the memory size: e.g. on
1389 * incoming migration, or right after reset.
1390 *
1391 * As memory core doesn't know how is memory accessed, it is up to
1392 * resize callback to update device state and/or add assertions to detect
1393 * misuse, if necessary.
1394 */
1395int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1396{
1397 RAMBlock *block = find_ram_block(base);
1398
1399 assert(block);
1400
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001401 newsize = TARGET_PAGE_ALIGN(newsize);
1402
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001403 if (block->used_length == newsize) {
1404 return 0;
1405 }
1406
1407 if (!(block->flags & RAM_RESIZEABLE)) {
1408 error_setg_errno(errp, EINVAL,
1409 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1410 " in != 0x" RAM_ADDR_FMT, block->idstr,
1411 newsize, block->used_length);
1412 return -EINVAL;
1413 }
1414
1415 if (block->max_length < newsize) {
1416 error_setg_errno(errp, EINVAL,
1417 "Length too large: %s: 0x" RAM_ADDR_FMT
1418 " > 0x" RAM_ADDR_FMT, block->idstr,
1419 newsize, block->max_length);
1420 return -EINVAL;
1421 }
1422
1423 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1424 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001425 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1426 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001427 memory_region_set_size(block->mr, newsize);
1428 if (block->resized) {
1429 block->resized(block->idstr, newsize, block->host);
1430 }
1431 return 0;
1432}
1433
Hu Taoef701d72014-09-09 13:27:54 +08001434static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001435{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001436 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001437 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001438 ram_addr_t old_ram_size, new_ram_size;
1439
1440 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001441
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001442 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001443 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001444
1445 if (!new_block->host) {
1446 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001447 xen_ram_alloc(new_block->offset, new_block->max_length,
1448 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001449 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001450 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001451 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001452 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001453 error_setg_errno(errp, errno,
1454 "cannot set up guest memory '%s'",
1455 memory_region_name(new_block->mr));
1456 qemu_mutex_unlock_ramlist();
1457 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001458 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001459 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001460 }
1461 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001462
Li Zhijiandd631692015-07-02 20:18:06 +08001463 new_ram_size = MAX(old_ram_size,
1464 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1465 if (new_ram_size > old_ram_size) {
1466 migration_bitmap_extend(old_ram_size, new_ram_size);
1467 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001468 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1469 * QLIST (which has an RCU-friendly variant) does not have insertion at
1470 * tail, so save the last element in last_block.
1471 */
Mike Day0dc3f442013-09-05 14:41:35 -04001472 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001473 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001474 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001475 break;
1476 }
1477 }
1478 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001479 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001480 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001481 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001482 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001483 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001484 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001485 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001486
Mike Day0dc3f442013-09-05 14:41:35 -04001487 /* Write list before version */
1488 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001489 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001490 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001491
Juan Quintela2152f5c2013-10-08 13:52:02 +02001492 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1493
1494 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001495 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001496
1497 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001498 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1499 ram_list.dirty_memory[i] =
1500 bitmap_zero_extend(ram_list.dirty_memory[i],
1501 old_ram_size, new_ram_size);
1502 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001503 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001504 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001505 new_block->used_length,
1506 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001507
Paolo Bonzinia904c912015-01-21 16:18:35 +01001508 if (new_block->host) {
1509 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1510 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1511 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1512 if (kvm_enabled()) {
1513 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1514 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001515 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001516
1517 return new_block->offset;
1518}
1519
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001520#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001521ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001522 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001523 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001524{
1525 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001526 ram_addr_t addr;
1527 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001528
1529 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001530 error_setg(errp, "-mem-path not supported with Xen");
1531 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001532 }
1533
1534 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1535 /*
1536 * file_ram_alloc() needs to allocate just like
1537 * phys_mem_alloc, but we haven't bothered to provide
1538 * a hook there.
1539 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001540 error_setg(errp,
1541 "-mem-path not supported with this accelerator");
1542 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001543 }
1544
1545 size = TARGET_PAGE_ALIGN(size);
1546 new_block = g_malloc0(sizeof(*new_block));
1547 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001548 new_block->used_length = size;
1549 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001550 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001551 new_block->host = file_ram_alloc(new_block, size,
1552 mem_path, errp);
1553 if (!new_block->host) {
1554 g_free(new_block);
1555 return -1;
1556 }
1557
Hu Taoef701d72014-09-09 13:27:54 +08001558 addr = ram_block_add(new_block, &local_err);
1559 if (local_err) {
1560 g_free(new_block);
1561 error_propagate(errp, local_err);
1562 return -1;
1563 }
1564 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001565}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001566#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001567
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001568static
1569ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1570 void (*resized)(const char*,
1571 uint64_t length,
1572 void *host),
1573 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001574 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575{
1576 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001577 ram_addr_t addr;
1578 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001579
1580 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001581 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001582 new_block = g_malloc0(sizeof(*new_block));
1583 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001584 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001585 new_block->used_length = size;
1586 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001587 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001588 new_block->fd = -1;
1589 new_block->host = host;
1590 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001591 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001593 if (resizeable) {
1594 new_block->flags |= RAM_RESIZEABLE;
1595 }
Hu Taoef701d72014-09-09 13:27:54 +08001596 addr = ram_block_add(new_block, &local_err);
1597 if (local_err) {
1598 g_free(new_block);
1599 error_propagate(errp, local_err);
1600 return -1;
1601 }
1602 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001603}
1604
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001605ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1606 MemoryRegion *mr, Error **errp)
1607{
1608 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1609}
1610
Hu Taoef701d72014-09-09 13:27:54 +08001611ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001612{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001613 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1614}
1615
1616ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1617 void (*resized)(const char*,
1618 uint64_t length,
1619 void *host),
1620 MemoryRegion *mr, Error **errp)
1621{
1622 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001623}
bellarde9a1ab12007-02-08 23:08:38 +00001624
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001625void qemu_ram_free_from_ptr(ram_addr_t addr)
1626{
1627 RAMBlock *block;
1628
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001629 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001630 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001631 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001632 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001633 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001634 /* Write list before version */
1635 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001636 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001637 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001638 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001639 }
1640 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001641 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001642}
1643
Paolo Bonzini43771532013-09-09 17:58:40 +02001644static void reclaim_ramblock(RAMBlock *block)
1645{
1646 if (block->flags & RAM_PREALLOC) {
1647 ;
1648 } else if (xen_enabled()) {
1649 xen_invalidate_map_cache_entry(block->host);
1650#ifndef _WIN32
1651 } else if (block->fd >= 0) {
1652 munmap(block->host, block->max_length);
1653 close(block->fd);
1654#endif
1655 } else {
1656 qemu_anon_ram_free(block->host, block->max_length);
1657 }
1658 g_free(block);
1659}
1660
Anthony Liguoric227f092009-10-01 16:12:16 -05001661void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001662{
Alex Williamson04b16652010-07-02 11:13:17 -06001663 RAMBlock *block;
1664
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001665 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001666 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001667 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001668 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001669 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001670 /* Write list before version */
1671 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001672 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001673 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001674 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001675 }
1676 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001677 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001678}
1679
Huang Yingcd19cfa2011-03-02 08:56:19 +01001680#ifndef _WIN32
1681void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1682{
1683 RAMBlock *block;
1684 ram_addr_t offset;
1685 int flags;
1686 void *area, *vaddr;
1687
Mike Day0dc3f442013-09-05 14:41:35 -04001688 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001689 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001690 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001691 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001692 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001693 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001694 } else if (xen_enabled()) {
1695 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001696 } else {
1697 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001698 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001699 flags |= (block->flags & RAM_SHARED ?
1700 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001701 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1702 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001703 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001704 /*
1705 * Remap needs to match alloc. Accelerators that
1706 * set phys_mem_alloc never remap. If they did,
1707 * we'd need a remap hook here.
1708 */
1709 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1710
Huang Yingcd19cfa2011-03-02 08:56:19 +01001711 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1712 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1713 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001714 }
1715 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001716 fprintf(stderr, "Could not remap addr: "
1717 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001718 length, addr);
1719 exit(1);
1720 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001721 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001722 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001723 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001724 }
1725 }
1726}
1727#endif /* !_WIN32 */
1728
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001729int qemu_get_ram_fd(ram_addr_t addr)
1730{
Mike Dayae3a7042013-09-05 14:41:35 -04001731 RAMBlock *block;
1732 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001733
Mike Day0dc3f442013-09-05 14:41:35 -04001734 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001735 block = qemu_get_ram_block(addr);
1736 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001737 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001738 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001739}
1740
Damjan Marion3fd74b82014-06-26 23:01:32 +02001741void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1742{
Mike Dayae3a7042013-09-05 14:41:35 -04001743 RAMBlock *block;
1744 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001745
Mike Day0dc3f442013-09-05 14:41:35 -04001746 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001747 block = qemu_get_ram_block(addr);
1748 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001749 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001750 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001751}
1752
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001753/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001754 * This should not be used for general purpose DMA. Use address_space_map
1755 * or address_space_rw instead. For local memory (e.g. video ram) that the
1756 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001757 *
1758 * By the time this function returns, the returned pointer is not protected
1759 * by RCU anymore. If the caller is not within an RCU critical section and
1760 * does not hold the iothread lock, it must have other means of protecting the
1761 * pointer, such as a reference to the region that includes the incoming
1762 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001763 */
1764void *qemu_get_ram_ptr(ram_addr_t addr)
1765{
Mike Dayae3a7042013-09-05 14:41:35 -04001766 RAMBlock *block;
1767 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001768
Mike Day0dc3f442013-09-05 14:41:35 -04001769 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001770 block = qemu_get_ram_block(addr);
1771
1772 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001773 /* We need to check if the requested address is in the RAM
1774 * because we don't want to map the entire memory in QEMU.
1775 * In that case just map until the end of the page.
1776 */
1777 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001778 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001779 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001780 }
Mike Dayae3a7042013-09-05 14:41:35 -04001781
1782 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001783 }
Mike Dayae3a7042013-09-05 14:41:35 -04001784 ptr = ramblock_ptr(block, addr - block->offset);
1785
Mike Day0dc3f442013-09-05 14:41:35 -04001786unlock:
1787 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001788 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001789}
1790
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001791/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001792 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001793 *
1794 * By the time this function returns, the returned pointer is not protected
1795 * by RCU anymore. If the caller is not within an RCU critical section and
1796 * does not hold the iothread lock, it must have other means of protecting the
1797 * pointer, such as a reference to the region that includes the incoming
1798 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001799 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001800static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001801{
Mike Dayae3a7042013-09-05 14:41:35 -04001802 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001803 if (*size == 0) {
1804 return NULL;
1805 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001806 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001807 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001808 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001809 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001810 rcu_read_lock();
1811 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001812 if (addr - block->offset < block->max_length) {
1813 if (addr - block->offset + *size > block->max_length)
1814 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001815 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001816 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001817 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001818 }
1819 }
1820
1821 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1822 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001823 }
1824}
1825
Paolo Bonzini7443b432013-06-03 12:44:02 +02001826/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001827 * (typically a TLB entry) back to a ram offset.
1828 *
1829 * By the time this function returns, the returned pointer is not protected
1830 * by RCU anymore. If the caller is not within an RCU critical section and
1831 * does not hold the iothread lock, it must have other means of protecting the
1832 * pointer, such as a reference to the region that includes the incoming
1833 * ram_addr_t.
1834 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001835MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001836{
pbrook94a6b542009-04-11 17:15:54 +00001837 RAMBlock *block;
1838 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001839 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001840
Jan Kiszka868bb332011-06-21 22:59:09 +02001841 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001842 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001843 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001844 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001845 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001846 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001847 }
1848
Mike Day0dc3f442013-09-05 14:41:35 -04001849 rcu_read_lock();
1850 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001851 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001852 goto found;
1853 }
1854
Mike Day0dc3f442013-09-05 14:41:35 -04001855 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001856 /* This case append when the block is not mapped. */
1857 if (block->host == NULL) {
1858 continue;
1859 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001860 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001861 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001862 }
pbrook94a6b542009-04-11 17:15:54 +00001863 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001864
Mike Day0dc3f442013-09-05 14:41:35 -04001865 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001866 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001867
1868found:
1869 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001870 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001871 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001872 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001873}
Alex Williamsonf471a172010-06-11 11:11:42 -06001874
Avi Kivitya8170e52012-10-23 12:30:10 +02001875static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001876 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001877{
Juan Quintela52159192013-10-08 12:44:04 +02001878 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001879 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001880 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001881 switch (size) {
1882 case 1:
1883 stb_p(qemu_get_ram_ptr(ram_addr), val);
1884 break;
1885 case 2:
1886 stw_p(qemu_get_ram_ptr(ram_addr), val);
1887 break;
1888 case 4:
1889 stl_p(qemu_get_ram_ptr(ram_addr), val);
1890 break;
1891 default:
1892 abort();
1893 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001894 /* Set both VGA and migration bits for simplicity and to remove
1895 * the notdirty callback faster.
1896 */
1897 cpu_physical_memory_set_dirty_range(ram_addr, size,
1898 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001899 /* we remove the notdirty callback only if the code has been
1900 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001901 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001902 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001903 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001904 }
bellard1ccde1c2004-02-06 19:46:14 +00001905}
1906
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001907static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1908 unsigned size, bool is_write)
1909{
1910 return is_write;
1911}
1912
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001913static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001914 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001915 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001916 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001917};
1918
pbrook0f459d12008-06-09 00:20:13 +00001919/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001920static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001921{
Andreas Färber93afead2013-08-26 03:41:01 +02001922 CPUState *cpu = current_cpu;
1923 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001924 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001925 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001926 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001927 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001928
Andreas Färberff4700b2013-08-26 18:23:18 +02001929 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001930 /* We re-entered the check after replacing the TB. Now raise
1931 * the debug interrupt so that is will trigger after the
1932 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001933 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001934 return;
1935 }
Andreas Färber93afead2013-08-26 03:41:01 +02001936 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001937 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001938 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1939 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001940 if (flags == BP_MEM_READ) {
1941 wp->flags |= BP_WATCHPOINT_HIT_READ;
1942 } else {
1943 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1944 }
1945 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001946 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001947 if (!cpu->watchpoint_hit) {
1948 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001949 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001950 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001951 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001952 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001953 } else {
1954 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001955 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001956 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001957 }
aliguori06d55cc2008-11-18 20:24:06 +00001958 }
aliguori6e140f22008-11-18 20:37:55 +00001959 } else {
1960 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001961 }
1962 }
1963}
1964
pbrook6658ffb2007-03-16 23:58:11 +00001965/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1966 so these check for a hit then pass through to the normal out-of-line
1967 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001968static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1969 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001970{
Peter Maydell66b9b432015-04-26 16:49:24 +01001971 MemTxResult res;
1972 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001973
Peter Maydell66b9b432015-04-26 16:49:24 +01001974 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001975 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001976 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001977 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001978 break;
1979 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001980 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001981 break;
1982 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001983 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001984 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001985 default: abort();
1986 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001987 *pdata = data;
1988 return res;
1989}
1990
1991static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1992 uint64_t val, unsigned size,
1993 MemTxAttrs attrs)
1994{
1995 MemTxResult res;
1996
1997 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1998 switch (size) {
1999 case 1:
2000 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2001 break;
2002 case 2:
2003 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2004 break;
2005 case 4:
2006 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2007 break;
2008 default: abort();
2009 }
2010 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002011}
2012
Avi Kivity1ec9b902012-01-02 12:47:48 +02002013static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002014 .read_with_attrs = watch_mem_read,
2015 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002016 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002017};
pbrook6658ffb2007-03-16 23:58:11 +00002018
Peter Maydellf25a49e2015-04-26 16:49:24 +01002019static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2020 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002021{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002022 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002023 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002024 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002025
blueswir1db7b5422007-05-26 17:36:03 +00002026#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002027 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002028 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002029#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002030 res = address_space_read(subpage->as, addr + subpage->base,
2031 attrs, buf, len);
2032 if (res) {
2033 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002034 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002035 switch (len) {
2036 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002037 *data = ldub_p(buf);
2038 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002039 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002040 *data = lduw_p(buf);
2041 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002042 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002043 *data = ldl_p(buf);
2044 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002045 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002046 *data = ldq_p(buf);
2047 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002048 default:
2049 abort();
2050 }
blueswir1db7b5422007-05-26 17:36:03 +00002051}
2052
Peter Maydellf25a49e2015-04-26 16:49:24 +01002053static MemTxResult subpage_write(void *opaque, hwaddr addr,
2054 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002055{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002056 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002057 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002058
blueswir1db7b5422007-05-26 17:36:03 +00002059#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002060 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002061 " value %"PRIx64"\n",
2062 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002063#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002064 switch (len) {
2065 case 1:
2066 stb_p(buf, value);
2067 break;
2068 case 2:
2069 stw_p(buf, value);
2070 break;
2071 case 4:
2072 stl_p(buf, value);
2073 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002074 case 8:
2075 stq_p(buf, value);
2076 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002077 default:
2078 abort();
2079 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002080 return address_space_write(subpage->as, addr + subpage->base,
2081 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002082}
2083
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002084static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002085 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002086{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002087 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002088#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002089 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002090 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002091#endif
2092
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002093 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002094 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002095}
2096
Avi Kivity70c68e42012-01-02 12:32:48 +02002097static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002098 .read_with_attrs = subpage_read,
2099 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002100 .impl.min_access_size = 1,
2101 .impl.max_access_size = 8,
2102 .valid.min_access_size = 1,
2103 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002104 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002105 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002106};
2107
Anthony Liguoric227f092009-10-01 16:12:16 -05002108static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002109 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002110{
2111 int idx, eidx;
2112
2113 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2114 return -1;
2115 idx = SUBPAGE_IDX(start);
2116 eidx = SUBPAGE_IDX(end);
2117#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002118 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2119 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002120#endif
blueswir1db7b5422007-05-26 17:36:03 +00002121 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002122 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002123 }
2124
2125 return 0;
2126}
2127
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002129{
Anthony Liguoric227f092009-10-01 16:12:16 -05002130 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002131
Anthony Liguori7267c092011-08-20 22:09:37 -05002132 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002133
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002135 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002136 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002137 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002138 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002139#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002140 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2141 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002142#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002143 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002144
2145 return mmio;
2146}
2147
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002148static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2149 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002150{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002151 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002152 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002153 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002154 .mr = mr,
2155 .offset_within_address_space = 0,
2156 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002157 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002158 };
2159
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002160 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002161}
2162
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002163MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002164{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002165 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2166 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002167
2168 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002169}
2170
Avi Kivitye9179ce2009-06-14 11:38:52 +03002171static void io_mem_init(void)
2172{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002173 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002174 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002175 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002176 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002177 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002178 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002179 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002180}
2181
Avi Kivityac1970f2012-10-03 16:22:53 +02002182static void mem_begin(MemoryListener *listener)
2183{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002184 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002185 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2186 uint16_t n;
2187
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002188 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002189 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002190 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002191 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002192 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002193 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002194 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002195 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002196
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002197 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002198 d->as = as;
2199 as->next_dispatch = d;
2200}
2201
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002202static void address_space_dispatch_free(AddressSpaceDispatch *d)
2203{
2204 phys_sections_free(&d->map);
2205 g_free(d);
2206}
2207
Paolo Bonzini00752702013-05-29 12:13:54 +02002208static void mem_commit(MemoryListener *listener)
2209{
2210 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002211 AddressSpaceDispatch *cur = as->dispatch;
2212 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002213
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002214 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002215
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002216 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002217 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002218 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002219 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002220}
2221
Avi Kivity1d711482012-10-02 18:54:45 +02002222static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002223{
Andreas Färber182735e2013-05-29 22:29:20 +02002224 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002225
2226 /* since each CPU stores ram addresses in its TLB cache, we must
2227 reset the modified entries */
2228 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002229 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002230 /* FIXME: Disentangle the cpu.h circular files deps so we can
2231 directly get the right CPU from listener. */
2232 if (cpu->tcg_as_listener != listener) {
2233 continue;
2234 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002235 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002236 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002237}
2238
Avi Kivityac1970f2012-10-03 16:22:53 +02002239void address_space_init_dispatch(AddressSpace *as)
2240{
Paolo Bonzini00752702013-05-29 12:13:54 +02002241 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002242 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002243 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002244 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002245 .region_add = mem_add,
2246 .region_nop = mem_add,
2247 .priority = 0,
2248 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002249 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002250}
2251
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002252void address_space_unregister(AddressSpace *as)
2253{
2254 memory_listener_unregister(&as->dispatch_listener);
2255}
2256
Avi Kivity83f3c252012-10-07 12:59:55 +02002257void address_space_destroy_dispatch(AddressSpace *as)
2258{
2259 AddressSpaceDispatch *d = as->dispatch;
2260
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002261 atomic_rcu_set(&as->dispatch, NULL);
2262 if (d) {
2263 call_rcu(d, address_space_dispatch_free, rcu);
2264 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002265}
2266
Avi Kivity62152b82011-07-26 14:26:14 +03002267static void memory_map_init(void)
2268{
Anthony Liguori7267c092011-08-20 22:09:37 -05002269 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002270
Paolo Bonzini57271d62013-11-07 17:14:37 +01002271 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002272 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002273
Anthony Liguori7267c092011-08-20 22:09:37 -05002274 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002275 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2276 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002277 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002278}
2279
2280MemoryRegion *get_system_memory(void)
2281{
2282 return system_memory;
2283}
2284
Avi Kivity309cb472011-08-08 16:09:03 +03002285MemoryRegion *get_system_io(void)
2286{
2287 return system_io;
2288}
2289
pbrooke2eef172008-06-08 01:09:01 +00002290#endif /* !defined(CONFIG_USER_ONLY) */
2291
bellard13eb76e2004-01-24 15:23:36 +00002292/* physical memory access (slow version, mainly for debug) */
2293#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002294int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002295 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002296{
2297 int l, flags;
2298 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002299 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002300
2301 while (len > 0) {
2302 page = addr & TARGET_PAGE_MASK;
2303 l = (page + TARGET_PAGE_SIZE) - addr;
2304 if (l > len)
2305 l = len;
2306 flags = page_get_flags(page);
2307 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002308 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002309 if (is_write) {
2310 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002311 return -1;
bellard579a97f2007-11-11 14:26:47 +00002312 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002313 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002314 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002315 memcpy(p, buf, l);
2316 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002317 } else {
2318 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002319 return -1;
bellard579a97f2007-11-11 14:26:47 +00002320 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002321 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002322 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002323 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002324 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002325 }
2326 len -= l;
2327 buf += l;
2328 addr += l;
2329 }
Paul Brooka68fe892010-03-01 00:08:59 +00002330 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002331}
bellard8df1cd02005-01-28 22:37:22 +00002332
bellard13eb76e2004-01-24 15:23:36 +00002333#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002334
Paolo Bonzini845b6212015-03-23 11:45:53 +01002335static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002336 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002337{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002338 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2339 /* No early return if dirty_log_mask is or becomes 0, because
2340 * cpu_physical_memory_set_dirty_range will still call
2341 * xen_modified_memory.
2342 */
2343 if (dirty_log_mask) {
2344 dirty_log_mask =
2345 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002346 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002347 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2348 tb_invalidate_phys_range(addr, addr + length);
2349 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2350 }
2351 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002352}
2353
Richard Henderson23326162013-07-08 14:55:59 -07002354static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002355{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002356 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002357
2358 /* Regions are assumed to support 1-4 byte accesses unless
2359 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002360 if (access_size_max == 0) {
2361 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002362 }
Richard Henderson23326162013-07-08 14:55:59 -07002363
2364 /* Bound the maximum access by the alignment of the address. */
2365 if (!mr->ops->impl.unaligned) {
2366 unsigned align_size_max = addr & -addr;
2367 if (align_size_max != 0 && align_size_max < access_size_max) {
2368 access_size_max = align_size_max;
2369 }
2370 }
2371
2372 /* Don't attempt accesses larger than the maximum. */
2373 if (l > access_size_max) {
2374 l = access_size_max;
2375 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002376 if (l & (l - 1)) {
2377 l = 1 << (qemu_fls(l) - 1);
2378 }
Richard Henderson23326162013-07-08 14:55:59 -07002379
2380 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002381}
2382
Jan Kiszka4840f102015-06-18 18:47:22 +02002383static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002384{
Jan Kiszka4840f102015-06-18 18:47:22 +02002385 bool unlocked = !qemu_mutex_iothread_locked();
2386 bool release_lock = false;
2387
2388 if (unlocked && mr->global_locking) {
2389 qemu_mutex_lock_iothread();
2390 unlocked = false;
2391 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002392 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002393 if (mr->flush_coalesced_mmio) {
2394 if (unlocked) {
2395 qemu_mutex_lock_iothread();
2396 }
2397 qemu_flush_coalesced_mmio_buffer();
2398 if (unlocked) {
2399 qemu_mutex_unlock_iothread();
2400 }
2401 }
2402
2403 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002404}
2405
Peter Maydell5c9eb022015-04-26 16:49:24 +01002406MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2407 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002408{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002409 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002410 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002411 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002412 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002413 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002414 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002415 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002416
Paolo Bonzini41063e12015-03-18 14:21:43 +01002417 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002418 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002419 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002420 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002421
bellard13eb76e2004-01-24 15:23:36 +00002422 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002423 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002424 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002425 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002426 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002427 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002428 switch (l) {
2429 case 8:
2430 /* 64 bit write access */
2431 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002432 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2433 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002434 break;
2435 case 4:
bellard1c213d12005-09-03 10:49:04 +00002436 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002437 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002438 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2439 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002440 break;
2441 case 2:
bellard1c213d12005-09-03 10:49:04 +00002442 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002443 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002444 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2445 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002446 break;
2447 case 1:
bellard1c213d12005-09-03 10:49:04 +00002448 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002449 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002450 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2451 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002452 break;
2453 default:
2454 abort();
bellard13eb76e2004-01-24 15:23:36 +00002455 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002456 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002457 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002458 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002459 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002460 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002461 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002462 }
2463 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002464 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002465 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002466 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002467 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002468 switch (l) {
2469 case 8:
2470 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002471 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2472 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002473 stq_p(buf, val);
2474 break;
2475 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002476 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002477 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2478 attrs);
bellardc27004e2005-01-03 23:35:10 +00002479 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002480 break;
2481 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002482 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002483 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2484 attrs);
bellardc27004e2005-01-03 23:35:10 +00002485 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002486 break;
2487 case 1:
bellard1c213d12005-09-03 10:49:04 +00002488 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002489 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2490 attrs);
bellardc27004e2005-01-03 23:35:10 +00002491 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002492 break;
2493 default:
2494 abort();
bellard13eb76e2004-01-24 15:23:36 +00002495 }
2496 } else {
2497 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002498 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002499 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002500 }
2501 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002502
2503 if (release_lock) {
2504 qemu_mutex_unlock_iothread();
2505 release_lock = false;
2506 }
2507
bellard13eb76e2004-01-24 15:23:36 +00002508 len -= l;
2509 buf += l;
2510 addr += l;
2511 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002512 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002513
Peter Maydell3b643492015-04-26 16:49:23 +01002514 return result;
bellard13eb76e2004-01-24 15:23:36 +00002515}
bellard8df1cd02005-01-28 22:37:22 +00002516
Peter Maydell5c9eb022015-04-26 16:49:24 +01002517MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2518 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002519{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002520 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002521}
2522
Peter Maydell5c9eb022015-04-26 16:49:24 +01002523MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2524 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002525{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002526 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002527}
2528
2529
Avi Kivitya8170e52012-10-23 12:30:10 +02002530void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002531 int len, int is_write)
2532{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002533 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2534 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002535}
2536
Alexander Graf582b55a2013-12-11 14:17:44 +01002537enum write_rom_type {
2538 WRITE_DATA,
2539 FLUSH_CACHE,
2540};
2541
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002542static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002543 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002544{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002545 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002546 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002547 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002548 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002549
Paolo Bonzini41063e12015-03-18 14:21:43 +01002550 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002551 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002552 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002553 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002554
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002555 if (!(memory_region_is_ram(mr) ||
2556 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002557 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002558 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002559 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002560 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002561 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002562 switch (type) {
2563 case WRITE_DATA:
2564 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002565 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002566 break;
2567 case FLUSH_CACHE:
2568 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2569 break;
2570 }
bellardd0ecd2a2006-04-23 17:14:48 +00002571 }
2572 len -= l;
2573 buf += l;
2574 addr += l;
2575 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002576 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002577}
2578
Alexander Graf582b55a2013-12-11 14:17:44 +01002579/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002580void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002581 const uint8_t *buf, int len)
2582{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002583 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002584}
2585
2586void cpu_flush_icache_range(hwaddr start, int len)
2587{
2588 /*
2589 * This function should do the same thing as an icache flush that was
2590 * triggered from within the guest. For TCG we are always cache coherent,
2591 * so there is no need to flush anything. For KVM / Xen we need to flush
2592 * the host's instruction cache at least.
2593 */
2594 if (tcg_enabled()) {
2595 return;
2596 }
2597
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002598 cpu_physical_memory_write_rom_internal(&address_space_memory,
2599 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002600}
2601
aliguori6d16c2f2009-01-22 16:59:11 +00002602typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002603 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002604 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002605 hwaddr addr;
2606 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002607 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002608} BounceBuffer;
2609
2610static BounceBuffer bounce;
2611
aliguoriba223c22009-01-22 16:59:16 +00002612typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002613 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002614 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002615} MapClient;
2616
Fam Zheng38e047b2015-03-16 17:03:35 +08002617QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002618static QLIST_HEAD(map_client_list, MapClient) map_client_list
2619 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002620
Fam Zhenge95205e2015-03-16 17:03:37 +08002621static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002622{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002623 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002624 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002625}
2626
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002627static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002628{
2629 MapClient *client;
2630
Blue Swirl72cf2d42009-09-12 07:36:22 +00002631 while (!QLIST_EMPTY(&map_client_list)) {
2632 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002633 qemu_bh_schedule(client->bh);
2634 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002635 }
2636}
2637
Fam Zhenge95205e2015-03-16 17:03:37 +08002638void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002639{
2640 MapClient *client = g_malloc(sizeof(*client));
2641
Fam Zheng38e047b2015-03-16 17:03:35 +08002642 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002643 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002644 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002645 if (!atomic_read(&bounce.in_use)) {
2646 cpu_notify_map_clients_locked();
2647 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002648 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002649}
2650
Fam Zheng38e047b2015-03-16 17:03:35 +08002651void cpu_exec_init_all(void)
2652{
2653 qemu_mutex_init(&ram_list.mutex);
2654 memory_map_init();
2655 io_mem_init();
2656 qemu_mutex_init(&map_client_list_lock);
2657}
2658
Fam Zhenge95205e2015-03-16 17:03:37 +08002659void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002660{
Fam Zhenge95205e2015-03-16 17:03:37 +08002661 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002662
Fam Zhenge95205e2015-03-16 17:03:37 +08002663 qemu_mutex_lock(&map_client_list_lock);
2664 QLIST_FOREACH(client, &map_client_list, link) {
2665 if (client->bh == bh) {
2666 cpu_unregister_map_client_do(client);
2667 break;
2668 }
2669 }
2670 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002671}
2672
2673static void cpu_notify_map_clients(void)
2674{
Fam Zheng38e047b2015-03-16 17:03:35 +08002675 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002676 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002677 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002678}
2679
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002680bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2681{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002682 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002683 hwaddr l, xlat;
2684
Paolo Bonzini41063e12015-03-18 14:21:43 +01002685 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002686 while (len > 0) {
2687 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002688 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2689 if (!memory_access_is_direct(mr, is_write)) {
2690 l = memory_access_size(mr, l, addr);
2691 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002692 return false;
2693 }
2694 }
2695
2696 len -= l;
2697 addr += l;
2698 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002699 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002700 return true;
2701}
2702
aliguori6d16c2f2009-01-22 16:59:11 +00002703/* Map a physical memory region into a host virtual address.
2704 * May map a subset of the requested range, given by and returned in *plen.
2705 * May return NULL if resources needed to perform the mapping are exhausted.
2706 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002707 * Use cpu_register_map_client() to know when retrying the map operation is
2708 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002709 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002710void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002711 hwaddr addr,
2712 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002713 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002714{
Avi Kivitya8170e52012-10-23 12:30:10 +02002715 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002716 hwaddr done = 0;
2717 hwaddr l, xlat, base;
2718 MemoryRegion *mr, *this_mr;
2719 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002720
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002721 if (len == 0) {
2722 return NULL;
2723 }
aliguori6d16c2f2009-01-22 16:59:11 +00002724
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002725 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002726 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002727 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002728
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002729 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002730 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002731 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002732 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002733 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002734 /* Avoid unbounded allocations */
2735 l = MIN(l, TARGET_PAGE_SIZE);
2736 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002737 bounce.addr = addr;
2738 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002739
2740 memory_region_ref(mr);
2741 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002742 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002743 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2744 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002745 }
aliguori6d16c2f2009-01-22 16:59:11 +00002746
Paolo Bonzini41063e12015-03-18 14:21:43 +01002747 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002748 *plen = l;
2749 return bounce.buffer;
2750 }
2751
2752 base = xlat;
2753 raddr = memory_region_get_ram_addr(mr);
2754
2755 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002756 len -= l;
2757 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002758 done += l;
2759 if (len == 0) {
2760 break;
2761 }
2762
2763 l = len;
2764 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2765 if (this_mr != mr || xlat != base + done) {
2766 break;
2767 }
aliguori6d16c2f2009-01-22 16:59:11 +00002768 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002769
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002770 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002771 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002772 *plen = done;
2773 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002774}
2775
Avi Kivityac1970f2012-10-03 16:22:53 +02002776/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002777 * Will also mark the memory as dirty if is_write == 1. access_len gives
2778 * the amount of memory that was actually read or written by the caller.
2779 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002780void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2781 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002782{
2783 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002784 MemoryRegion *mr;
2785 ram_addr_t addr1;
2786
2787 mr = qemu_ram_addr_from_host(buffer, &addr1);
2788 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002789 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002790 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002791 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002792 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002793 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002794 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002795 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002796 return;
2797 }
2798 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002799 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2800 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002801 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002802 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002803 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002804 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002805 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002806 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002807}
bellardd0ecd2a2006-04-23 17:14:48 +00002808
Avi Kivitya8170e52012-10-23 12:30:10 +02002809void *cpu_physical_memory_map(hwaddr addr,
2810 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002811 int is_write)
2812{
2813 return address_space_map(&address_space_memory, addr, plen, is_write);
2814}
2815
Avi Kivitya8170e52012-10-23 12:30:10 +02002816void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2817 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002818{
2819 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2820}
2821
bellard8df1cd02005-01-28 22:37:22 +00002822/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002823static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2824 MemTxAttrs attrs,
2825 MemTxResult *result,
2826 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002827{
bellard8df1cd02005-01-28 22:37:22 +00002828 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002829 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002830 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002831 hwaddr l = 4;
2832 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002833 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002834 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002835
Paolo Bonzini41063e12015-03-18 14:21:43 +01002836 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002837 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002838 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002839 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002840
bellard8df1cd02005-01-28 22:37:22 +00002841 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002842 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002843#if defined(TARGET_WORDS_BIGENDIAN)
2844 if (endian == DEVICE_LITTLE_ENDIAN) {
2845 val = bswap32(val);
2846 }
2847#else
2848 if (endian == DEVICE_BIG_ENDIAN) {
2849 val = bswap32(val);
2850 }
2851#endif
bellard8df1cd02005-01-28 22:37:22 +00002852 } else {
2853 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002854 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002855 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002856 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002857 switch (endian) {
2858 case DEVICE_LITTLE_ENDIAN:
2859 val = ldl_le_p(ptr);
2860 break;
2861 case DEVICE_BIG_ENDIAN:
2862 val = ldl_be_p(ptr);
2863 break;
2864 default:
2865 val = ldl_p(ptr);
2866 break;
2867 }
Peter Maydell50013112015-04-26 16:49:24 +01002868 r = MEMTX_OK;
2869 }
2870 if (result) {
2871 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002872 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002873 if (release_lock) {
2874 qemu_mutex_unlock_iothread();
2875 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002876 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002877 return val;
2878}
2879
Peter Maydell50013112015-04-26 16:49:24 +01002880uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2881 MemTxAttrs attrs, MemTxResult *result)
2882{
2883 return address_space_ldl_internal(as, addr, attrs, result,
2884 DEVICE_NATIVE_ENDIAN);
2885}
2886
2887uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2888 MemTxAttrs attrs, MemTxResult *result)
2889{
2890 return address_space_ldl_internal(as, addr, attrs, result,
2891 DEVICE_LITTLE_ENDIAN);
2892}
2893
2894uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2895 MemTxAttrs attrs, MemTxResult *result)
2896{
2897 return address_space_ldl_internal(as, addr, attrs, result,
2898 DEVICE_BIG_ENDIAN);
2899}
2900
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002901uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002902{
Peter Maydell50013112015-04-26 16:49:24 +01002903 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002904}
2905
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002906uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002907{
Peter Maydell50013112015-04-26 16:49:24 +01002908 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002909}
2910
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002911uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002912{
Peter Maydell50013112015-04-26 16:49:24 +01002913 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002914}
2915
bellard84b7b8e2005-11-28 21:19:04 +00002916/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002917static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2918 MemTxAttrs attrs,
2919 MemTxResult *result,
2920 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002921{
bellard84b7b8e2005-11-28 21:19:04 +00002922 uint8_t *ptr;
2923 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002924 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002925 hwaddr l = 8;
2926 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002927 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002928 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002929
Paolo Bonzini41063e12015-03-18 14:21:43 +01002930 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002931 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002932 false);
2933 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002934 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002935
bellard84b7b8e2005-11-28 21:19:04 +00002936 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002937 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002938#if defined(TARGET_WORDS_BIGENDIAN)
2939 if (endian == DEVICE_LITTLE_ENDIAN) {
2940 val = bswap64(val);
2941 }
2942#else
2943 if (endian == DEVICE_BIG_ENDIAN) {
2944 val = bswap64(val);
2945 }
2946#endif
bellard84b7b8e2005-11-28 21:19:04 +00002947 } else {
2948 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002949 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002950 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002951 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002952 switch (endian) {
2953 case DEVICE_LITTLE_ENDIAN:
2954 val = ldq_le_p(ptr);
2955 break;
2956 case DEVICE_BIG_ENDIAN:
2957 val = ldq_be_p(ptr);
2958 break;
2959 default:
2960 val = ldq_p(ptr);
2961 break;
2962 }
Peter Maydell50013112015-04-26 16:49:24 +01002963 r = MEMTX_OK;
2964 }
2965 if (result) {
2966 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002967 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002968 if (release_lock) {
2969 qemu_mutex_unlock_iothread();
2970 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002971 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002972 return val;
2973}
2974
Peter Maydell50013112015-04-26 16:49:24 +01002975uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2976 MemTxAttrs attrs, MemTxResult *result)
2977{
2978 return address_space_ldq_internal(as, addr, attrs, result,
2979 DEVICE_NATIVE_ENDIAN);
2980}
2981
2982uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2983 MemTxAttrs attrs, MemTxResult *result)
2984{
2985 return address_space_ldq_internal(as, addr, attrs, result,
2986 DEVICE_LITTLE_ENDIAN);
2987}
2988
2989uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2990 MemTxAttrs attrs, MemTxResult *result)
2991{
2992 return address_space_ldq_internal(as, addr, attrs, result,
2993 DEVICE_BIG_ENDIAN);
2994}
2995
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002996uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002997{
Peter Maydell50013112015-04-26 16:49:24 +01002998 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002999}
3000
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003001uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003002{
Peter Maydell50013112015-04-26 16:49:24 +01003003 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003004}
3005
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003006uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007{
Peter Maydell50013112015-04-26 16:49:24 +01003008 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003009}
3010
bellardaab33092005-10-30 20:48:42 +00003011/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003012uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3013 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003014{
3015 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003016 MemTxResult r;
3017
3018 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3019 if (result) {
3020 *result = r;
3021 }
bellardaab33092005-10-30 20:48:42 +00003022 return val;
3023}
3024
Peter Maydell50013112015-04-26 16:49:24 +01003025uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3026{
3027 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3028}
3029
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003030/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003031static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3032 hwaddr addr,
3033 MemTxAttrs attrs,
3034 MemTxResult *result,
3035 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003036{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003037 uint8_t *ptr;
3038 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003039 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003040 hwaddr l = 2;
3041 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003042 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003043 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003044
Paolo Bonzini41063e12015-03-18 14:21:43 +01003045 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003046 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003047 false);
3048 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003049 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003050
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003051 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003052 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003053#if defined(TARGET_WORDS_BIGENDIAN)
3054 if (endian == DEVICE_LITTLE_ENDIAN) {
3055 val = bswap16(val);
3056 }
3057#else
3058 if (endian == DEVICE_BIG_ENDIAN) {
3059 val = bswap16(val);
3060 }
3061#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003062 } else {
3063 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003064 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003065 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003066 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003067 switch (endian) {
3068 case DEVICE_LITTLE_ENDIAN:
3069 val = lduw_le_p(ptr);
3070 break;
3071 case DEVICE_BIG_ENDIAN:
3072 val = lduw_be_p(ptr);
3073 break;
3074 default:
3075 val = lduw_p(ptr);
3076 break;
3077 }
Peter Maydell50013112015-04-26 16:49:24 +01003078 r = MEMTX_OK;
3079 }
3080 if (result) {
3081 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003082 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003083 if (release_lock) {
3084 qemu_mutex_unlock_iothread();
3085 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003086 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003087 return val;
bellardaab33092005-10-30 20:48:42 +00003088}
3089
Peter Maydell50013112015-04-26 16:49:24 +01003090uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3091 MemTxAttrs attrs, MemTxResult *result)
3092{
3093 return address_space_lduw_internal(as, addr, attrs, result,
3094 DEVICE_NATIVE_ENDIAN);
3095}
3096
3097uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3098 MemTxAttrs attrs, MemTxResult *result)
3099{
3100 return address_space_lduw_internal(as, addr, attrs, result,
3101 DEVICE_LITTLE_ENDIAN);
3102}
3103
3104uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3105 MemTxAttrs attrs, MemTxResult *result)
3106{
3107 return address_space_lduw_internal(as, addr, attrs, result,
3108 DEVICE_BIG_ENDIAN);
3109}
3110
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003111uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003112{
Peter Maydell50013112015-04-26 16:49:24 +01003113 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003114}
3115
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003116uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003117{
Peter Maydell50013112015-04-26 16:49:24 +01003118 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003119}
3120
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003121uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003122{
Peter Maydell50013112015-04-26 16:49:24 +01003123 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003124}
3125
bellard8df1cd02005-01-28 22:37:22 +00003126/* warning: addr must be aligned. The ram page is not masked as dirty
3127 and the code inside is not invalidated. It is useful if the dirty
3128 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003129void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3130 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003131{
bellard8df1cd02005-01-28 22:37:22 +00003132 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003133 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003134 hwaddr l = 4;
3135 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003136 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003137 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003138 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003139
Paolo Bonzini41063e12015-03-18 14:21:43 +01003140 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003141 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003142 true);
3143 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003144 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003145
Peter Maydell50013112015-04-26 16:49:24 +01003146 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003147 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003148 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003149 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003150 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003151
Paolo Bonzini845b6212015-03-23 11:45:53 +01003152 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3153 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003154 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003155 r = MEMTX_OK;
3156 }
3157 if (result) {
3158 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003159 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003160 if (release_lock) {
3161 qemu_mutex_unlock_iothread();
3162 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003163 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003164}
3165
Peter Maydell50013112015-04-26 16:49:24 +01003166void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3167{
3168 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3169}
3170
bellard8df1cd02005-01-28 22:37:22 +00003171/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003172static inline void address_space_stl_internal(AddressSpace *as,
3173 hwaddr addr, uint32_t val,
3174 MemTxAttrs attrs,
3175 MemTxResult *result,
3176 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003177{
bellard8df1cd02005-01-28 22:37:22 +00003178 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003179 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003180 hwaddr l = 4;
3181 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003182 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003183 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003184
Paolo Bonzini41063e12015-03-18 14:21:43 +01003185 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003186 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003187 true);
3188 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003189 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003190
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003191#if defined(TARGET_WORDS_BIGENDIAN)
3192 if (endian == DEVICE_LITTLE_ENDIAN) {
3193 val = bswap32(val);
3194 }
3195#else
3196 if (endian == DEVICE_BIG_ENDIAN) {
3197 val = bswap32(val);
3198 }
3199#endif
Peter Maydell50013112015-04-26 16:49:24 +01003200 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003201 } else {
bellard8df1cd02005-01-28 22:37:22 +00003202 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003203 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003204 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003205 switch (endian) {
3206 case DEVICE_LITTLE_ENDIAN:
3207 stl_le_p(ptr, val);
3208 break;
3209 case DEVICE_BIG_ENDIAN:
3210 stl_be_p(ptr, val);
3211 break;
3212 default:
3213 stl_p(ptr, val);
3214 break;
3215 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003216 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003217 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003218 }
Peter Maydell50013112015-04-26 16:49:24 +01003219 if (result) {
3220 *result = r;
3221 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003222 if (release_lock) {
3223 qemu_mutex_unlock_iothread();
3224 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003225 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003226}
3227
3228void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3229 MemTxAttrs attrs, MemTxResult *result)
3230{
3231 address_space_stl_internal(as, addr, val, attrs, result,
3232 DEVICE_NATIVE_ENDIAN);
3233}
3234
3235void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3236 MemTxAttrs attrs, MemTxResult *result)
3237{
3238 address_space_stl_internal(as, addr, val, attrs, result,
3239 DEVICE_LITTLE_ENDIAN);
3240}
3241
3242void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3243 MemTxAttrs attrs, MemTxResult *result)
3244{
3245 address_space_stl_internal(as, addr, val, attrs, result,
3246 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003247}
3248
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003249void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250{
Peter Maydell50013112015-04-26 16:49:24 +01003251 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003252}
3253
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003254void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255{
Peter Maydell50013112015-04-26 16:49:24 +01003256 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003257}
3258
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003259void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260{
Peter Maydell50013112015-04-26 16:49:24 +01003261 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003262}
3263
bellardaab33092005-10-30 20:48:42 +00003264/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003265void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3266 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003267{
3268 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003269 MemTxResult r;
3270
3271 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3272 if (result) {
3273 *result = r;
3274 }
3275}
3276
3277void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3278{
3279 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003280}
3281
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003282/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003283static inline void address_space_stw_internal(AddressSpace *as,
3284 hwaddr addr, uint32_t val,
3285 MemTxAttrs attrs,
3286 MemTxResult *result,
3287 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003288{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003289 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003290 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003291 hwaddr l = 2;
3292 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003293 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003294 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003295
Paolo Bonzini41063e12015-03-18 14:21:43 +01003296 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003297 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003298 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003299 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003300
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003301#if defined(TARGET_WORDS_BIGENDIAN)
3302 if (endian == DEVICE_LITTLE_ENDIAN) {
3303 val = bswap16(val);
3304 }
3305#else
3306 if (endian == DEVICE_BIG_ENDIAN) {
3307 val = bswap16(val);
3308 }
3309#endif
Peter Maydell50013112015-04-26 16:49:24 +01003310 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003311 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003312 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003313 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003314 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003315 switch (endian) {
3316 case DEVICE_LITTLE_ENDIAN:
3317 stw_le_p(ptr, val);
3318 break;
3319 case DEVICE_BIG_ENDIAN:
3320 stw_be_p(ptr, val);
3321 break;
3322 default:
3323 stw_p(ptr, val);
3324 break;
3325 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003326 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003327 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003328 }
Peter Maydell50013112015-04-26 16:49:24 +01003329 if (result) {
3330 *result = r;
3331 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003332 if (release_lock) {
3333 qemu_mutex_unlock_iothread();
3334 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003335 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003336}
3337
3338void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3339 MemTxAttrs attrs, MemTxResult *result)
3340{
3341 address_space_stw_internal(as, addr, val, attrs, result,
3342 DEVICE_NATIVE_ENDIAN);
3343}
3344
3345void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3346 MemTxAttrs attrs, MemTxResult *result)
3347{
3348 address_space_stw_internal(as, addr, val, attrs, result,
3349 DEVICE_LITTLE_ENDIAN);
3350}
3351
3352void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3353 MemTxAttrs attrs, MemTxResult *result)
3354{
3355 address_space_stw_internal(as, addr, val, attrs, result,
3356 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003357}
3358
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003359void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003360{
Peter Maydell50013112015-04-26 16:49:24 +01003361 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003362}
3363
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003364void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365{
Peter Maydell50013112015-04-26 16:49:24 +01003366 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003367}
3368
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003369void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370{
Peter Maydell50013112015-04-26 16:49:24 +01003371 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003372}
3373
bellardaab33092005-10-30 20:48:42 +00003374/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003375void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3376 MemTxAttrs attrs, MemTxResult *result)
3377{
3378 MemTxResult r;
3379 val = tswap64(val);
3380 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3381 if (result) {
3382 *result = r;
3383 }
3384}
3385
3386void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3387 MemTxAttrs attrs, MemTxResult *result)
3388{
3389 MemTxResult r;
3390 val = cpu_to_le64(val);
3391 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3392 if (result) {
3393 *result = r;
3394 }
3395}
3396void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3397 MemTxAttrs attrs, MemTxResult *result)
3398{
3399 MemTxResult r;
3400 val = cpu_to_be64(val);
3401 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3402 if (result) {
3403 *result = r;
3404 }
3405}
3406
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003407void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003408{
Peter Maydell50013112015-04-26 16:49:24 +01003409 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003410}
3411
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003412void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413{
Peter Maydell50013112015-04-26 16:49:24 +01003414 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003415}
3416
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003417void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003418{
Peter Maydell50013112015-04-26 16:49:24 +01003419 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003420}
3421
aliguori5e2972f2009-03-28 17:51:36 +00003422/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003423int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003424 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003425{
3426 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003427 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003428 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003429
3430 while (len > 0) {
3431 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003432 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003433 /* if no physical page mapped, return an error */
3434 if (phys_addr == -1)
3435 return -1;
3436 l = (page + TARGET_PAGE_SIZE) - addr;
3437 if (l > len)
3438 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003439 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003440 if (is_write) {
3441 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3442 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003443 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3444 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003445 }
bellard13eb76e2004-01-24 15:23:36 +00003446 len -= l;
3447 buf += l;
3448 addr += l;
3449 }
3450 return 0;
3451}
Paul Brooka68fe892010-03-01 00:08:59 +00003452#endif
bellard13eb76e2004-01-24 15:23:36 +00003453
Blue Swirl8e4a4242013-01-06 18:30:17 +00003454/*
3455 * A helper function for the _utterly broken_ virtio device model to find out if
3456 * it's running on a big endian machine. Don't do this at home kids!
3457 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003458bool target_words_bigendian(void);
3459bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003460{
3461#if defined(TARGET_WORDS_BIGENDIAN)
3462 return true;
3463#else
3464 return false;
3465#endif
3466}
3467
Wen Congyang76f35532012-05-07 12:04:18 +08003468#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003469bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003470{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003471 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003472 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003473 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003474
Paolo Bonzini41063e12015-03-18 14:21:43 +01003475 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003476 mr = address_space_translate(&address_space_memory,
3477 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003478
Paolo Bonzini41063e12015-03-18 14:21:43 +01003479 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3480 rcu_read_unlock();
3481 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003482}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003483
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003484int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003485{
3486 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003487 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003488
Mike Day0dc3f442013-09-05 14:41:35 -04003489 rcu_read_lock();
3490 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003491 ret = func(block->idstr, block->host, block->offset,
3492 block->used_length, opaque);
3493 if (ret) {
3494 break;
3495 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003496 }
Mike Day0dc3f442013-09-05 14:41:35 -04003497 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003498 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003499}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003500#endif