blob: 7d60e1530c1bcb3f2705adf23baa9c14f36e76e5 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020093DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andreas Färber1a1562f2013-06-17 04:09:11 +0200481const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200487 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200490 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200495 }
496};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200497
pbrook9656f322008-07-01 20:01:19 +0000498#endif
499
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100500CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400501{
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400503
Andreas Färberbdc44642013-06-24 23:50:24 +0200504 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100505 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200506 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100507 }
Glauber Costa950f1472009-06-09 12:15:18 -0400508 }
509
Andreas Färberbdc44642013-06-24 23:50:24 +0200510 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400511}
512
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
Bharata B Raob7bca732015-06-23 19:31:13 -0700529#ifndef CONFIG_USER_ONLY
530static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
531
532static int cpu_get_free_index(Error **errp)
533{
534 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
535
536 if (cpu >= MAX_CPUMASK_BITS) {
537 error_setg(errp, "Trying to use more CPUs than max of %d",
538 MAX_CPUMASK_BITS);
539 return -1;
540 }
541
542 bitmap_set(cpu_index_map, cpu, 1);
543 return cpu;
544}
545
546void cpu_exec_exit(CPUState *cpu)
547{
548 if (cpu->cpu_index == -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
550 return;
551 }
552
553 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
554 cpu->cpu_index = -1;
555}
556#else
557
558static int cpu_get_free_index(Error **errp)
559{
560 CPUState *some_cpu;
561 int cpu_index = 0;
562
563 CPU_FOREACH(some_cpu) {
564 cpu_index++;
565 }
566 return cpu_index;
567}
568
569void cpu_exec_exit(CPUState *cpu)
570{
571}
572#endif
573
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700574void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000575{
Andreas Färberb170fce2013-01-20 20:23:22 +0100576 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000577 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700578 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000579
Eduardo Habkost291135b2015-04-27 17:00:33 -0300580#ifndef CONFIG_USER_ONLY
581 cpu->as = &address_space_memory;
582 cpu->thread_id = qemu_get_thread_id();
583 cpu_reload_memory_map(cpu);
584#endif
585
pbrookc2764712009-03-07 15:24:59 +0000586#if defined(CONFIG_USER_ONLY)
587 cpu_list_lock();
588#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700589 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
590 if (local_err) {
591 error_propagate(errp, local_err);
592#if defined(CONFIG_USER_ONLY)
593 cpu_list_unlock();
594#endif
595 return;
bellard6a00d602005-11-21 23:25:50 +0000596 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200597 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000598#if defined(CONFIG_USER_ONLY)
599 cpu_list_unlock();
600#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200601 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
602 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
603 }
pbrookb3c77242008-06-30 16:31:04 +0000604#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600605 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700606 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100607 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200608 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000609#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100610 if (cc->vmsd != NULL) {
611 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
612 }
bellardfd6ce8f2003-05-14 19:00:11 +0000613}
614
Paul Brook94df27f2010-02-28 23:47:45 +0000615#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200616static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000617{
618 tb_invalidate_phys_page_range(pc, pc + 1, 0);
619}
620#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200621static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400622{
Max Filippove8262a12013-09-27 22:29:17 +0400623 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
624 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000625 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100626 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400627 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400628}
bellardc27004e2005-01-03 23:35:10 +0000629#endif
bellardd720b932004-04-25 17:57:43 +0000630
Paul Brookc527ee82010-03-01 03:31:14 +0000631#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200632void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000633
634{
635}
636
Peter Maydell3ee887e2014-09-12 14:06:48 +0100637int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
638 int flags)
639{
640 return -ENOSYS;
641}
642
643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
644{
645}
646
Andreas Färber75a34032013-09-02 16:57:02 +0200647int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000648 int flags, CPUWatchpoint **watchpoint)
649{
650 return -ENOSYS;
651}
652#else
pbrook6658ffb2007-03-16 23:58:11 +0000653/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200654int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000655 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000656{
aliguoric0ce9982008-11-25 22:13:57 +0000657 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000658
Peter Maydell05068c02014-09-12 14:06:48 +0100659 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700660 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200661 error_report("tried to set invalid watchpoint at %"
662 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000663 return -EINVAL;
664 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500665 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000666
aliguoria1d1bb32008-11-18 20:07:32 +0000667 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100668 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000669 wp->flags = flags;
670
aliguori2dc9f412008-11-18 20:56:59 +0000671 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200672 if (flags & BP_GDB) {
673 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
674 } else {
675 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
676 }
aliguoria1d1bb32008-11-18 20:07:32 +0000677
Andreas Färber31b030d2013-09-04 01:29:02 +0200678 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000679
680 if (watchpoint)
681 *watchpoint = wp;
682 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000683}
684
aliguoria1d1bb32008-11-18 20:07:32 +0000685/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200686int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000687 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000688{
aliguoria1d1bb32008-11-18 20:07:32 +0000689 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000690
Andreas Färberff4700b2013-08-26 18:23:18 +0200691 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100692 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000693 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200694 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000695 return 0;
696 }
697 }
aliguoria1d1bb32008-11-18 20:07:32 +0000698 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000699}
700
aliguoria1d1bb32008-11-18 20:07:32 +0000701/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200702void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000703{
Andreas Färberff4700b2013-08-26 18:23:18 +0200704 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000705
Andreas Färber31b030d2013-09-04 01:29:02 +0200706 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000707
Anthony Liguori7267c092011-08-20 22:09:37 -0500708 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000709}
710
aliguoria1d1bb32008-11-18 20:07:32 +0000711/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200712void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000713{
aliguoric0ce9982008-11-25 22:13:57 +0000714 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000715
Andreas Färberff4700b2013-08-26 18:23:18 +0200716 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200717 if (wp->flags & mask) {
718 cpu_watchpoint_remove_by_ref(cpu, wp);
719 }
aliguoric0ce9982008-11-25 22:13:57 +0000720 }
aliguoria1d1bb32008-11-18 20:07:32 +0000721}
Peter Maydell05068c02014-09-12 14:06:48 +0100722
723/* Return true if this watchpoint address matches the specified
724 * access (ie the address range covered by the watchpoint overlaps
725 * partially or completely with the address range covered by the
726 * access).
727 */
728static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
729 vaddr addr,
730 vaddr len)
731{
732 /* We know the lengths are non-zero, but a little caution is
733 * required to avoid errors in the case where the range ends
734 * exactly at the top of the address space and so addr + len
735 * wraps round to zero.
736 */
737 vaddr wpend = wp->vaddr + wp->len - 1;
738 vaddr addrend = addr + len - 1;
739
740 return !(addr > wpend || wp->vaddr > addrend);
741}
742
Paul Brookc527ee82010-03-01 03:31:14 +0000743#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000744
745/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200746int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000747 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000748{
aliguoric0ce9982008-11-25 22:13:57 +0000749 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000750
Anthony Liguori7267c092011-08-20 22:09:37 -0500751 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000752
753 bp->pc = pc;
754 bp->flags = flags;
755
aliguori2dc9f412008-11-18 20:56:59 +0000756 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200757 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200758 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200759 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200760 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200761 }
aliguoria1d1bb32008-11-18 20:07:32 +0000762
Andreas Färberf0c3c502013-08-26 21:22:53 +0200763 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000764
Andreas Färber00b941e2013-06-29 18:55:54 +0200765 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000766 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200767 }
aliguoria1d1bb32008-11-18 20:07:32 +0000768 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000769}
770
771/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200772int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000773{
aliguoria1d1bb32008-11-18 20:07:32 +0000774 CPUBreakpoint *bp;
775
Andreas Färberf0c3c502013-08-26 21:22:53 +0200776 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000777 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200778 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000779 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000780 }
bellard4c3a88a2003-07-26 12:06:08 +0000781 }
aliguoria1d1bb32008-11-18 20:07:32 +0000782 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000783}
784
aliguoria1d1bb32008-11-18 20:07:32 +0000785/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200786void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000787{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200788 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
789
790 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000791
Anthony Liguori7267c092011-08-20 22:09:37 -0500792 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000793}
794
795/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200796void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000797{
aliguoric0ce9982008-11-25 22:13:57 +0000798 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000799
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200801 if (bp->flags & mask) {
802 cpu_breakpoint_remove_by_ref(cpu, bp);
803 }
aliguoric0ce9982008-11-25 22:13:57 +0000804 }
bellard4c3a88a2003-07-26 12:06:08 +0000805}
806
bellardc33a3462003-07-29 20:50:33 +0000807/* enable or disable single step mode. EXCP_DEBUG is returned by the
808 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200809void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000810{
Andreas Färbered2803d2013-06-21 20:20:45 +0200811 if (cpu->singlestep_enabled != enabled) {
812 cpu->singlestep_enabled = enabled;
813 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200814 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200815 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100816 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000817 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700818 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000819 }
bellardc33a3462003-07-29 20:50:33 +0000820 }
bellardc33a3462003-07-29 20:50:33 +0000821}
822
Andreas Färbera47dddd2013-09-03 17:38:47 +0200823void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000824{
825 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000826 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000827
828 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000829 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000830 fprintf(stderr, "qemu: fatal: ");
831 vfprintf(stderr, fmt, ap);
832 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200833 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000834 if (qemu_log_enabled()) {
835 qemu_log("qemu: fatal: ");
836 qemu_log_vprintf(fmt, ap2);
837 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200838 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000839 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000840 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000841 }
pbrook493ae1f2007-11-23 16:53:59 +0000842 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000843 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200844#if defined(CONFIG_USER_ONLY)
845 {
846 struct sigaction act;
847 sigfillset(&act.sa_mask);
848 act.sa_handler = SIG_DFL;
849 sigaction(SIGABRT, &act, NULL);
850 }
851#endif
bellard75012672003-06-21 13:11:07 +0000852 abort();
853}
854
bellard01243112004-01-04 15:48:17 +0000855#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400856/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200857static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
858{
859 RAMBlock *block;
860
Paolo Bonzini43771532013-09-09 17:58:40 +0200861 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200862 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200863 goto found;
864 }
Mike Day0dc3f442013-09-05 14:41:35 -0400865 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200866 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200867 goto found;
868 }
869 }
870
871 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
872 abort();
873
874found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200875 /* It is safe to write mru_block outside the iothread lock. This
876 * is what happens:
877 *
878 * mru_block = xxx
879 * rcu_read_unlock()
880 * xxx removed from list
881 * rcu_read_lock()
882 * read mru_block
883 * mru_block = NULL;
884 * call_rcu(reclaim_ramblock, xxx);
885 * rcu_read_unlock()
886 *
887 * atomic_rcu_set is not needed here. The block was already published
888 * when it was placed into the list. Here we're just making an extra
889 * copy of the pointer.
890 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200891 ram_list.mru_block = block;
892 return block;
893}
894
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200895static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000896{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200897 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200898 RAMBlock *block;
899 ram_addr_t end;
900
901 end = TARGET_PAGE_ALIGN(start + length);
902 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000903
Mike Day0dc3f442013-09-05 14:41:35 -0400904 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200905 block = qemu_get_ram_block(start);
906 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200907 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000908 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400909 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200910}
911
912/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000913bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
914 ram_addr_t length,
915 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200916{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000917 unsigned long end, page;
918 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200919
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000920 if (length == 0) {
921 return false;
922 }
923
924 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
925 page = start >> TARGET_PAGE_BITS;
926 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
927 page, end - page);
928
929 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200930 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200931 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000932
933 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000934}
935
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100936/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200937hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200938 MemoryRegionSection *section,
939 target_ulong vaddr,
940 hwaddr paddr, hwaddr xlat,
941 int prot,
942 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000943{
Avi Kivitya8170e52012-10-23 12:30:10 +0200944 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000945 CPUWatchpoint *wp;
946
Blue Swirlcc5bea62012-04-14 14:56:48 +0000947 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000948 /* Normal RAM. */
949 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200950 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000951 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200952 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000953 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200954 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000955 }
956 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100957 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200958 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000959 }
960
961 /* Make accesses to pages with watchpoints go via the
962 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200963 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100964 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000965 /* Avoid trapping reads of pages with a write breakpoint. */
966 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200967 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000968 *address |= TLB_MMIO;
969 break;
970 }
971 }
972 }
973
974 return iotlb;
975}
bellard9fa3e852004-01-04 18:06:42 +0000976#endif /* defined(CONFIG_USER_ONLY) */
977
pbrooke2eef172008-06-08 01:09:01 +0000978#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000979
Anthony Liguoric227f092009-10-01 16:12:16 -0500980static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200981 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200982static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200983
Igor Mammedova2b257d2014-10-31 16:38:37 +0000984static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
985 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200986
987/*
988 * Set a custom physical guest memory alloator.
989 * Accelerators with unusual needs may need this. Hopefully, we can
990 * get rid of it eventually.
991 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000992void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200993{
994 phys_mem_alloc = alloc;
995}
996
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200997static uint16_t phys_section_add(PhysPageMap *map,
998 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200999{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001000 /* The physical section number is ORed with a page-aligned
1001 * pointer to produce the iotlb entries. Thus it should
1002 * never overflow into the page-aligned value.
1003 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001004 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001005
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001006 if (map->sections_nb == map->sections_nb_alloc) {
1007 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1008 map->sections = g_renew(MemoryRegionSection, map->sections,
1009 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001010 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001011 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001012 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001013 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001014}
1015
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001016static void phys_section_destroy(MemoryRegion *mr)
1017{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001018 memory_region_unref(mr);
1019
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001020 if (mr->subpage) {
1021 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001022 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001023 g_free(subpage);
1024 }
1025}
1026
Paolo Bonzini60926662013-05-29 12:30:26 +02001027static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001028{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001029 while (map->sections_nb > 0) {
1030 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001031 phys_section_destroy(section->mr);
1032 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001033 g_free(map->sections);
1034 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001035}
1036
Avi Kivityac1970f2012-10-03 16:22:53 +02001037static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001038{
1039 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001040 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001041 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001042 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001043 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001044 MemoryRegionSection subsection = {
1045 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001046 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001047 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001048 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001049
Avi Kivityf3705d52012-03-08 16:16:34 +02001050 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001051
Avi Kivityf3705d52012-03-08 16:16:34 +02001052 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001053 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001054 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001055 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001056 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001057 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001058 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001059 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001060 }
1061 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001062 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001063 subpage_register(subpage, start, end,
1064 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001065}
1066
1067
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001068static void register_multipage(AddressSpaceDispatch *d,
1069 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001070{
Avi Kivitya8170e52012-10-23 12:30:10 +02001071 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001072 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001073 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1074 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001075
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001076 assert(num_pages);
1077 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001078}
1079
Avi Kivityac1970f2012-10-03 16:22:53 +02001080static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001081{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001082 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001083 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001084 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001085 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001086
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001087 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1088 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1089 - now.offset_within_address_space;
1090
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001091 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001092 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001093 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001094 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001095 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001096 while (int128_ne(remain.size, now.size)) {
1097 remain.size = int128_sub(remain.size, now.size);
1098 remain.offset_within_address_space += int128_get64(now.size);
1099 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001100 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001101 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001102 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001103 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001104 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001105 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001106 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001107 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001108 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001109 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001110 }
1111}
1112
Sheng Yang62a27442010-01-26 19:21:16 +08001113void qemu_flush_coalesced_mmio_buffer(void)
1114{
1115 if (kvm_enabled())
1116 kvm_flush_coalesced_mmio_buffer();
1117}
1118
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001119void qemu_mutex_lock_ramlist(void)
1120{
1121 qemu_mutex_lock(&ram_list.mutex);
1122}
1123
1124void qemu_mutex_unlock_ramlist(void)
1125{
1126 qemu_mutex_unlock(&ram_list.mutex);
1127}
1128
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001129#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001130
1131#include <sys/vfs.h>
1132
1133#define HUGETLBFS_MAGIC 0x958458f6
1134
Hu Taofc7a5802014-09-09 13:28:01 +08001135static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001136{
1137 struct statfs fs;
1138 int ret;
1139
1140 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001141 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001142 } while (ret != 0 && errno == EINTR);
1143
1144 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001145 error_setg_errno(errp, errno, "failed to get page size of file %s",
1146 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001147 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001148 }
1149
1150 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001151 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001152
1153 return fs.f_bsize;
1154}
1155
Alex Williamson04b16652010-07-02 11:13:17 -06001156static void *file_ram_alloc(RAMBlock *block,
1157 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001158 const char *path,
1159 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001160{
1161 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001162 char *sanitized_name;
1163 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001164 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001165 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001166 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001167 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001168
Hu Taofc7a5802014-09-09 13:28:01 +08001169 hpagesize = gethugepagesize(path, &local_err);
1170 if (local_err) {
1171 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001172 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001173 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001174 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001175
1176 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001177 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1178 "or larger than huge page size 0x%" PRIx64,
1179 memory, hpagesize);
1180 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001181 }
1182
1183 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001184 error_setg(errp,
1185 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001186 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001187 }
1188
Peter Feiner8ca761f2013-03-04 13:54:25 -05001189 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001190 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001191 for (c = sanitized_name; *c != '\0'; c++) {
1192 if (*c == '/')
1193 *c = '_';
1194 }
1195
1196 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1197 sanitized_name);
1198 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199
1200 fd = mkstemp(filename);
1201 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001202 error_setg_errno(errp, errno,
1203 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001204 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001205 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001206 }
1207 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001208 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209
1210 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1211
1212 /*
1213 * ftruncate is not supported by hugetlbfs in older
1214 * hosts, so don't bother bailing out on errors.
1215 * If anything goes wrong with it under other filesystems,
1216 * mmap will fail.
1217 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001218 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001219 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001220 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001222 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1223 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1224 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001225 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001226 error_setg_errno(errp, errno,
1227 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001228 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001229 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001230 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001231
1232 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001233 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001234 }
1235
Alex Williamson04b16652010-07-02 11:13:17 -06001236 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001238
1239error:
1240 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001241 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001242 exit(1);
1243 }
1244 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001245}
1246#endif
1247
Mike Day0dc3f442013-09-05 14:41:35 -04001248/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001249static ram_addr_t find_ram_offset(ram_addr_t size)
1250{
Alex Williamson04b16652010-07-02 11:13:17 -06001251 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001252 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001253
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001254 assert(size != 0); /* it would hand out same offset multiple times */
1255
Mike Day0dc3f442013-09-05 14:41:35 -04001256 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001257 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001258 }
Alex Williamson04b16652010-07-02 11:13:17 -06001259
Mike Day0dc3f442013-09-05 14:41:35 -04001260 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001261 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001262
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001263 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001264
Mike Day0dc3f442013-09-05 14:41:35 -04001265 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001266 if (next_block->offset >= end) {
1267 next = MIN(next, next_block->offset);
1268 }
1269 }
1270 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001271 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001272 mingap = next - end;
1273 }
1274 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001275
1276 if (offset == RAM_ADDR_MAX) {
1277 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1278 (uint64_t)size);
1279 abort();
1280 }
1281
Alex Williamson04b16652010-07-02 11:13:17 -06001282 return offset;
1283}
1284
Juan Quintela652d7ec2012-07-20 10:37:54 +02001285ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001286{
Alex Williamsond17b5282010-06-25 11:08:38 -06001287 RAMBlock *block;
1288 ram_addr_t last = 0;
1289
Mike Day0dc3f442013-09-05 14:41:35 -04001290 rcu_read_lock();
1291 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001292 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001293 }
Mike Day0dc3f442013-09-05 14:41:35 -04001294 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001295 return last;
1296}
1297
Jason Baronddb97f12012-08-02 15:44:16 -04001298static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1299{
1300 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001301
1302 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001303 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001304 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1305 if (ret) {
1306 perror("qemu_madvise");
1307 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1308 "but dump_guest_core=off specified\n");
1309 }
1310 }
1311}
1312
Mike Day0dc3f442013-09-05 14:41:35 -04001313/* Called within an RCU critical section, or while the ramlist lock
1314 * is held.
1315 */
Hu Tao20cfe882014-04-02 15:13:26 +08001316static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001317{
Hu Tao20cfe882014-04-02 15:13:26 +08001318 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001319
Mike Day0dc3f442013-09-05 14:41:35 -04001320 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001321 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001322 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001323 }
1324 }
Hu Tao20cfe882014-04-02 15:13:26 +08001325
1326 return NULL;
1327}
1328
Mike Dayae3a7042013-09-05 14:41:35 -04001329/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001330void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1331{
Mike Dayae3a7042013-09-05 14:41:35 -04001332 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001333
Mike Day0dc3f442013-09-05 14:41:35 -04001334 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001335 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001336 assert(new_block);
1337 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001338
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001339 if (dev) {
1340 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001341 if (id) {
1342 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001343 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001344 }
1345 }
1346 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1347
Mike Day0dc3f442013-09-05 14:41:35 -04001348 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001349 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001350 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1351 new_block->idstr);
1352 abort();
1353 }
1354 }
Mike Day0dc3f442013-09-05 14:41:35 -04001355 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001356}
1357
Mike Dayae3a7042013-09-05 14:41:35 -04001358/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001359void qemu_ram_unset_idstr(ram_addr_t addr)
1360{
Mike Dayae3a7042013-09-05 14:41:35 -04001361 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001362
Mike Dayae3a7042013-09-05 14:41:35 -04001363 /* FIXME: arch_init.c assumes that this is not called throughout
1364 * migration. Ignore the problem since hot-unplug during migration
1365 * does not work anyway.
1366 */
1367
Mike Day0dc3f442013-09-05 14:41:35 -04001368 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001369 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001370 if (block) {
1371 memset(block->idstr, 0, sizeof(block->idstr));
1372 }
Mike Day0dc3f442013-09-05 14:41:35 -04001373 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001374}
1375
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001376static int memory_try_enable_merging(void *addr, size_t len)
1377{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001378 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001379 /* disabled by the user */
1380 return 0;
1381 }
1382
1383 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1384}
1385
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001386/* Only legal before guest might have detected the memory size: e.g. on
1387 * incoming migration, or right after reset.
1388 *
1389 * As memory core doesn't know how is memory accessed, it is up to
1390 * resize callback to update device state and/or add assertions to detect
1391 * misuse, if necessary.
1392 */
1393int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1394{
1395 RAMBlock *block = find_ram_block(base);
1396
1397 assert(block);
1398
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001399 newsize = TARGET_PAGE_ALIGN(newsize);
1400
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001401 if (block->used_length == newsize) {
1402 return 0;
1403 }
1404
1405 if (!(block->flags & RAM_RESIZEABLE)) {
1406 error_setg_errno(errp, EINVAL,
1407 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1408 " in != 0x" RAM_ADDR_FMT, block->idstr,
1409 newsize, block->used_length);
1410 return -EINVAL;
1411 }
1412
1413 if (block->max_length < newsize) {
1414 error_setg_errno(errp, EINVAL,
1415 "Length too large: %s: 0x" RAM_ADDR_FMT
1416 " > 0x" RAM_ADDR_FMT, block->idstr,
1417 newsize, block->max_length);
1418 return -EINVAL;
1419 }
1420
1421 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1422 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001423 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1424 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001425 memory_region_set_size(block->mr, newsize);
1426 if (block->resized) {
1427 block->resized(block->idstr, newsize, block->host);
1428 }
1429 return 0;
1430}
1431
Hu Taoef701d72014-09-09 13:27:54 +08001432static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001433{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001434 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001435 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001436 ram_addr_t old_ram_size, new_ram_size;
1437
1438 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001439
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001440 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001441 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001442
1443 if (!new_block->host) {
1444 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001445 xen_ram_alloc(new_block->offset, new_block->max_length,
1446 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001447 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001448 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001449 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001450 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001451 error_setg_errno(errp, errno,
1452 "cannot set up guest memory '%s'",
1453 memory_region_name(new_block->mr));
1454 qemu_mutex_unlock_ramlist();
1455 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001456 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001457 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001458 }
1459 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001460
Li Zhijiandd631692015-07-02 20:18:06 +08001461 new_ram_size = MAX(old_ram_size,
1462 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1463 if (new_ram_size > old_ram_size) {
1464 migration_bitmap_extend(old_ram_size, new_ram_size);
1465 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001466 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1467 * QLIST (which has an RCU-friendly variant) does not have insertion at
1468 * tail, so save the last element in last_block.
1469 */
Mike Day0dc3f442013-09-05 14:41:35 -04001470 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001471 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001472 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001473 break;
1474 }
1475 }
1476 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001477 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001478 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001479 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001480 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001481 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001482 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001483 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001484
Mike Day0dc3f442013-09-05 14:41:35 -04001485 /* Write list before version */
1486 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001487 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001488 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001489
Juan Quintela2152f5c2013-10-08 13:52:02 +02001490 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1491
1492 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001493 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001494
1495 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001496 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1497 ram_list.dirty_memory[i] =
1498 bitmap_zero_extend(ram_list.dirty_memory[i],
1499 old_ram_size, new_ram_size);
1500 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001501 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001502 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001503 new_block->used_length,
1504 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001505
Paolo Bonzinia904c912015-01-21 16:18:35 +01001506 if (new_block->host) {
1507 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1508 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1509 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1510 if (kvm_enabled()) {
1511 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1512 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001513 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001514
1515 return new_block->offset;
1516}
1517
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001518#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001519ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001520 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001521 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001522{
1523 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001524 ram_addr_t addr;
1525 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001526
1527 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001528 error_setg(errp, "-mem-path not supported with Xen");
1529 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001530 }
1531
1532 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1533 /*
1534 * file_ram_alloc() needs to allocate just like
1535 * phys_mem_alloc, but we haven't bothered to provide
1536 * a hook there.
1537 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001538 error_setg(errp,
1539 "-mem-path not supported with this accelerator");
1540 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001541 }
1542
1543 size = TARGET_PAGE_ALIGN(size);
1544 new_block = g_malloc0(sizeof(*new_block));
1545 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001546 new_block->used_length = size;
1547 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001548 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001549 new_block->host = file_ram_alloc(new_block, size,
1550 mem_path, errp);
1551 if (!new_block->host) {
1552 g_free(new_block);
1553 return -1;
1554 }
1555
Hu Taoef701d72014-09-09 13:27:54 +08001556 addr = ram_block_add(new_block, &local_err);
1557 if (local_err) {
1558 g_free(new_block);
1559 error_propagate(errp, local_err);
1560 return -1;
1561 }
1562 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001563}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001564#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001565
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001566static
1567ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1568 void (*resized)(const char*,
1569 uint64_t length,
1570 void *host),
1571 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001572 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001573{
1574 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001575 ram_addr_t addr;
1576 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577
1578 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001579 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001580 new_block = g_malloc0(sizeof(*new_block));
1581 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001582 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001583 new_block->used_length = size;
1584 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001585 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001586 new_block->fd = -1;
1587 new_block->host = host;
1588 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001589 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001590 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001591 if (resizeable) {
1592 new_block->flags |= RAM_RESIZEABLE;
1593 }
Hu Taoef701d72014-09-09 13:27:54 +08001594 addr = ram_block_add(new_block, &local_err);
1595 if (local_err) {
1596 g_free(new_block);
1597 error_propagate(errp, local_err);
1598 return -1;
1599 }
1600 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001601}
1602
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001603ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1604 MemoryRegion *mr, Error **errp)
1605{
1606 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1607}
1608
Hu Taoef701d72014-09-09 13:27:54 +08001609ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001610{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001611 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1612}
1613
1614ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1615 void (*resized)(const char*,
1616 uint64_t length,
1617 void *host),
1618 MemoryRegion *mr, Error **errp)
1619{
1620 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001621}
bellarde9a1ab12007-02-08 23:08:38 +00001622
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001623void qemu_ram_free_from_ptr(ram_addr_t addr)
1624{
1625 RAMBlock *block;
1626
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001627 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001628 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001629 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001630 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001631 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001632 /* Write list before version */
1633 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001634 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001635 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001636 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001637 }
1638 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001639 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001640}
1641
Paolo Bonzini43771532013-09-09 17:58:40 +02001642static void reclaim_ramblock(RAMBlock *block)
1643{
1644 if (block->flags & RAM_PREALLOC) {
1645 ;
1646 } else if (xen_enabled()) {
1647 xen_invalidate_map_cache_entry(block->host);
1648#ifndef _WIN32
1649 } else if (block->fd >= 0) {
1650 munmap(block->host, block->max_length);
1651 close(block->fd);
1652#endif
1653 } else {
1654 qemu_anon_ram_free(block->host, block->max_length);
1655 }
1656 g_free(block);
1657}
1658
Anthony Liguoric227f092009-10-01 16:12:16 -05001659void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001660{
Alex Williamson04b16652010-07-02 11:13:17 -06001661 RAMBlock *block;
1662
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001663 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001664 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001665 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001666 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001667 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001668 /* Write list before version */
1669 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001670 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001671 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001672 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001673 }
1674 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001675 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001676}
1677
Huang Yingcd19cfa2011-03-02 08:56:19 +01001678#ifndef _WIN32
1679void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1680{
1681 RAMBlock *block;
1682 ram_addr_t offset;
1683 int flags;
1684 void *area, *vaddr;
1685
Mike Day0dc3f442013-09-05 14:41:35 -04001686 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001687 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001688 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001689 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001690 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001691 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001692 } else if (xen_enabled()) {
1693 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001694 } else {
1695 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001696 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001697 flags |= (block->flags & RAM_SHARED ?
1698 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001699 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1700 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001701 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001702 /*
1703 * Remap needs to match alloc. Accelerators that
1704 * set phys_mem_alloc never remap. If they did,
1705 * we'd need a remap hook here.
1706 */
1707 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1708
Huang Yingcd19cfa2011-03-02 08:56:19 +01001709 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1710 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1711 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001712 }
1713 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001714 fprintf(stderr, "Could not remap addr: "
1715 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001716 length, addr);
1717 exit(1);
1718 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001719 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001720 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001721 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001722 }
1723 }
1724}
1725#endif /* !_WIN32 */
1726
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001727int qemu_get_ram_fd(ram_addr_t addr)
1728{
Mike Dayae3a7042013-09-05 14:41:35 -04001729 RAMBlock *block;
1730 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001731
Mike Day0dc3f442013-09-05 14:41:35 -04001732 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001733 block = qemu_get_ram_block(addr);
1734 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001735 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001736 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001737}
1738
Damjan Marion3fd74b82014-06-26 23:01:32 +02001739void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1740{
Mike Dayae3a7042013-09-05 14:41:35 -04001741 RAMBlock *block;
1742 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001743
Mike Day0dc3f442013-09-05 14:41:35 -04001744 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001745 block = qemu_get_ram_block(addr);
1746 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001747 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001748 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001749}
1750
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001751/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001752 * This should not be used for general purpose DMA. Use address_space_map
1753 * or address_space_rw instead. For local memory (e.g. video ram) that the
1754 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001755 *
1756 * By the time this function returns, the returned pointer is not protected
1757 * by RCU anymore. If the caller is not within an RCU critical section and
1758 * does not hold the iothread lock, it must have other means of protecting the
1759 * pointer, such as a reference to the region that includes the incoming
1760 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001761 */
1762void *qemu_get_ram_ptr(ram_addr_t addr)
1763{
Mike Dayae3a7042013-09-05 14:41:35 -04001764 RAMBlock *block;
1765 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001766
Mike Day0dc3f442013-09-05 14:41:35 -04001767 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001768 block = qemu_get_ram_block(addr);
1769
1770 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001771 /* We need to check if the requested address is in the RAM
1772 * because we don't want to map the entire memory in QEMU.
1773 * In that case just map until the end of the page.
1774 */
1775 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001776 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001777 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001778 }
Mike Dayae3a7042013-09-05 14:41:35 -04001779
1780 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001781 }
Mike Dayae3a7042013-09-05 14:41:35 -04001782 ptr = ramblock_ptr(block, addr - block->offset);
1783
Mike Day0dc3f442013-09-05 14:41:35 -04001784unlock:
1785 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001786 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001787}
1788
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001789/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001790 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001791 *
1792 * By the time this function returns, the returned pointer is not protected
1793 * by RCU anymore. If the caller is not within an RCU critical section and
1794 * does not hold the iothread lock, it must have other means of protecting the
1795 * pointer, such as a reference to the region that includes the incoming
1796 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001797 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001798static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001799{
Mike Dayae3a7042013-09-05 14:41:35 -04001800 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001801 if (*size == 0) {
1802 return NULL;
1803 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001804 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001805 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001806 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001807 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001808 rcu_read_lock();
1809 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001810 if (addr - block->offset < block->max_length) {
1811 if (addr - block->offset + *size > block->max_length)
1812 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001813 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001814 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001815 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001816 }
1817 }
1818
1819 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1820 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001821 }
1822}
1823
Paolo Bonzini7443b432013-06-03 12:44:02 +02001824/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001825 * (typically a TLB entry) back to a ram offset.
1826 *
1827 * By the time this function returns, the returned pointer is not protected
1828 * by RCU anymore. If the caller is not within an RCU critical section and
1829 * does not hold the iothread lock, it must have other means of protecting the
1830 * pointer, such as a reference to the region that includes the incoming
1831 * ram_addr_t.
1832 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001833MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001834{
pbrook94a6b542009-04-11 17:15:54 +00001835 RAMBlock *block;
1836 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001837 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001838
Jan Kiszka868bb332011-06-21 22:59:09 +02001839 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001840 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001841 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001842 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001843 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001844 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001845 }
1846
Mike Day0dc3f442013-09-05 14:41:35 -04001847 rcu_read_lock();
1848 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001849 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001850 goto found;
1851 }
1852
Mike Day0dc3f442013-09-05 14:41:35 -04001853 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001854 /* This case append when the block is not mapped. */
1855 if (block->host == NULL) {
1856 continue;
1857 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001858 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001859 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001860 }
pbrook94a6b542009-04-11 17:15:54 +00001861 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001862
Mike Day0dc3f442013-09-05 14:41:35 -04001863 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001864 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001865
1866found:
1867 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001868 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001869 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001870 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001871}
Alex Williamsonf471a172010-06-11 11:11:42 -06001872
Avi Kivitya8170e52012-10-23 12:30:10 +02001873static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001874 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001875{
Juan Quintela52159192013-10-08 12:44:04 +02001876 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001877 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001878 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001879 switch (size) {
1880 case 1:
1881 stb_p(qemu_get_ram_ptr(ram_addr), val);
1882 break;
1883 case 2:
1884 stw_p(qemu_get_ram_ptr(ram_addr), val);
1885 break;
1886 case 4:
1887 stl_p(qemu_get_ram_ptr(ram_addr), val);
1888 break;
1889 default:
1890 abort();
1891 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001892 /* Set both VGA and migration bits for simplicity and to remove
1893 * the notdirty callback faster.
1894 */
1895 cpu_physical_memory_set_dirty_range(ram_addr, size,
1896 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001897 /* we remove the notdirty callback only if the code has been
1898 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001899 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001900 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001901 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001902 }
bellard1ccde1c2004-02-06 19:46:14 +00001903}
1904
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001905static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1906 unsigned size, bool is_write)
1907{
1908 return is_write;
1909}
1910
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001911static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001912 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001913 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001914 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001915};
1916
pbrook0f459d12008-06-09 00:20:13 +00001917/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001918static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001919{
Andreas Färber93afead2013-08-26 03:41:01 +02001920 CPUState *cpu = current_cpu;
1921 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001922 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001923 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001924 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001925 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001926
Andreas Färberff4700b2013-08-26 18:23:18 +02001927 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001928 /* We re-entered the check after replacing the TB. Now raise
1929 * the debug interrupt so that is will trigger after the
1930 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001931 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001932 return;
1933 }
Andreas Färber93afead2013-08-26 03:41:01 +02001934 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001935 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001936 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1937 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001938 if (flags == BP_MEM_READ) {
1939 wp->flags |= BP_WATCHPOINT_HIT_READ;
1940 } else {
1941 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1942 }
1943 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001944 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001945 if (!cpu->watchpoint_hit) {
1946 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001947 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001948 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001949 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001950 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001951 } else {
1952 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001953 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001954 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001955 }
aliguori06d55cc2008-11-18 20:24:06 +00001956 }
aliguori6e140f22008-11-18 20:37:55 +00001957 } else {
1958 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001959 }
1960 }
1961}
1962
pbrook6658ffb2007-03-16 23:58:11 +00001963/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1964 so these check for a hit then pass through to the normal out-of-line
1965 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001966static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1967 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001968{
Peter Maydell66b9b432015-04-26 16:49:24 +01001969 MemTxResult res;
1970 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001971
Peter Maydell66b9b432015-04-26 16:49:24 +01001972 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001973 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001974 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001975 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001976 break;
1977 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001978 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001979 break;
1980 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001981 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001982 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001983 default: abort();
1984 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001985 *pdata = data;
1986 return res;
1987}
1988
1989static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1990 uint64_t val, unsigned size,
1991 MemTxAttrs attrs)
1992{
1993 MemTxResult res;
1994
1995 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1996 switch (size) {
1997 case 1:
1998 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1999 break;
2000 case 2:
2001 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2002 break;
2003 case 4:
2004 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2005 break;
2006 default: abort();
2007 }
2008 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002009}
2010
Avi Kivity1ec9b902012-01-02 12:47:48 +02002011static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002012 .read_with_attrs = watch_mem_read,
2013 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002014 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002015};
pbrook6658ffb2007-03-16 23:58:11 +00002016
Peter Maydellf25a49e2015-04-26 16:49:24 +01002017static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2018 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002019{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002020 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002021 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002022 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002023
blueswir1db7b5422007-05-26 17:36:03 +00002024#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002025 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002026 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002027#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002028 res = address_space_read(subpage->as, addr + subpage->base,
2029 attrs, buf, len);
2030 if (res) {
2031 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002032 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002033 switch (len) {
2034 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002035 *data = ldub_p(buf);
2036 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002037 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002038 *data = lduw_p(buf);
2039 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002040 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002041 *data = ldl_p(buf);
2042 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002043 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002044 *data = ldq_p(buf);
2045 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002046 default:
2047 abort();
2048 }
blueswir1db7b5422007-05-26 17:36:03 +00002049}
2050
Peter Maydellf25a49e2015-04-26 16:49:24 +01002051static MemTxResult subpage_write(void *opaque, hwaddr addr,
2052 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002053{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002054 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002055 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002056
blueswir1db7b5422007-05-26 17:36:03 +00002057#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002058 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002059 " value %"PRIx64"\n",
2060 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002061#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002062 switch (len) {
2063 case 1:
2064 stb_p(buf, value);
2065 break;
2066 case 2:
2067 stw_p(buf, value);
2068 break;
2069 case 4:
2070 stl_p(buf, value);
2071 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002072 case 8:
2073 stq_p(buf, value);
2074 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002075 default:
2076 abort();
2077 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002078 return address_space_write(subpage->as, addr + subpage->base,
2079 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002080}
2081
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002082static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002083 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002084{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002085 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002086#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002087 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002088 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002089#endif
2090
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002091 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002092 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002093}
2094
Avi Kivity70c68e42012-01-02 12:32:48 +02002095static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002096 .read_with_attrs = subpage_read,
2097 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002098 .impl.min_access_size = 1,
2099 .impl.max_access_size = 8,
2100 .valid.min_access_size = 1,
2101 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002102 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002103 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002104};
2105
Anthony Liguoric227f092009-10-01 16:12:16 -05002106static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002107 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002108{
2109 int idx, eidx;
2110
2111 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2112 return -1;
2113 idx = SUBPAGE_IDX(start);
2114 eidx = SUBPAGE_IDX(end);
2115#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002116 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2117 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002118#endif
blueswir1db7b5422007-05-26 17:36:03 +00002119 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002120 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002121 }
2122
2123 return 0;
2124}
2125
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002126static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002127{
Anthony Liguoric227f092009-10-01 16:12:16 -05002128 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002129
Anthony Liguori7267c092011-08-20 22:09:37 -05002130 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002131
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002132 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002133 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002134 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002135 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002136 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002137#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002138 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2139 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002140#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002141 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002142
2143 return mmio;
2144}
2145
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002146static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2147 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002148{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002149 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002150 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002151 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002152 .mr = mr,
2153 .offset_within_address_space = 0,
2154 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002155 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002156 };
2157
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002158 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002159}
2160
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002161MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002162{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002163 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2164 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002165
2166 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002167}
2168
Avi Kivitye9179ce2009-06-14 11:38:52 +03002169static void io_mem_init(void)
2170{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002171 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002172 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002173 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002174 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002175 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002176 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002177 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002178}
2179
Avi Kivityac1970f2012-10-03 16:22:53 +02002180static void mem_begin(MemoryListener *listener)
2181{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002182 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002183 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2184 uint16_t n;
2185
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002186 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002187 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002188 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002189 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002190 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002191 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002192 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002193 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002194
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002195 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002196 d->as = as;
2197 as->next_dispatch = d;
2198}
2199
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002200static void address_space_dispatch_free(AddressSpaceDispatch *d)
2201{
2202 phys_sections_free(&d->map);
2203 g_free(d);
2204}
2205
Paolo Bonzini00752702013-05-29 12:13:54 +02002206static void mem_commit(MemoryListener *listener)
2207{
2208 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002209 AddressSpaceDispatch *cur = as->dispatch;
2210 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002211
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002212 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002213
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002214 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002215 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002216 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002217 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002218}
2219
Avi Kivity1d711482012-10-02 18:54:45 +02002220static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002221{
Andreas Färber182735e2013-05-29 22:29:20 +02002222 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002223
2224 /* since each CPU stores ram addresses in its TLB cache, we must
2225 reset the modified entries */
2226 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002227 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002228 /* FIXME: Disentangle the cpu.h circular files deps so we can
2229 directly get the right CPU from listener. */
2230 if (cpu->tcg_as_listener != listener) {
2231 continue;
2232 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002233 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002234 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002235}
2236
Avi Kivityac1970f2012-10-03 16:22:53 +02002237void address_space_init_dispatch(AddressSpace *as)
2238{
Paolo Bonzini00752702013-05-29 12:13:54 +02002239 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002240 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002241 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002242 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002243 .region_add = mem_add,
2244 .region_nop = mem_add,
2245 .priority = 0,
2246 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002247 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002248}
2249
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002250void address_space_unregister(AddressSpace *as)
2251{
2252 memory_listener_unregister(&as->dispatch_listener);
2253}
2254
Avi Kivity83f3c252012-10-07 12:59:55 +02002255void address_space_destroy_dispatch(AddressSpace *as)
2256{
2257 AddressSpaceDispatch *d = as->dispatch;
2258
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002259 atomic_rcu_set(&as->dispatch, NULL);
2260 if (d) {
2261 call_rcu(d, address_space_dispatch_free, rcu);
2262 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002263}
2264
Avi Kivity62152b82011-07-26 14:26:14 +03002265static void memory_map_init(void)
2266{
Anthony Liguori7267c092011-08-20 22:09:37 -05002267 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002268
Paolo Bonzini57271d62013-11-07 17:14:37 +01002269 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002270 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002271
Anthony Liguori7267c092011-08-20 22:09:37 -05002272 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002273 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2274 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002275 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002276}
2277
2278MemoryRegion *get_system_memory(void)
2279{
2280 return system_memory;
2281}
2282
Avi Kivity309cb472011-08-08 16:09:03 +03002283MemoryRegion *get_system_io(void)
2284{
2285 return system_io;
2286}
2287
pbrooke2eef172008-06-08 01:09:01 +00002288#endif /* !defined(CONFIG_USER_ONLY) */
2289
bellard13eb76e2004-01-24 15:23:36 +00002290/* physical memory access (slow version, mainly for debug) */
2291#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002292int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002293 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002294{
2295 int l, flags;
2296 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002297 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002298
2299 while (len > 0) {
2300 page = addr & TARGET_PAGE_MASK;
2301 l = (page + TARGET_PAGE_SIZE) - addr;
2302 if (l > len)
2303 l = len;
2304 flags = page_get_flags(page);
2305 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002306 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002307 if (is_write) {
2308 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002309 return -1;
bellard579a97f2007-11-11 14:26:47 +00002310 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002311 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002312 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002313 memcpy(p, buf, l);
2314 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002315 } else {
2316 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002317 return -1;
bellard579a97f2007-11-11 14:26:47 +00002318 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002319 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002320 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002321 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002322 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002323 }
2324 len -= l;
2325 buf += l;
2326 addr += l;
2327 }
Paul Brooka68fe892010-03-01 00:08:59 +00002328 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002329}
bellard8df1cd02005-01-28 22:37:22 +00002330
bellard13eb76e2004-01-24 15:23:36 +00002331#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002332
Paolo Bonzini845b6212015-03-23 11:45:53 +01002333static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002334 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002335{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002336 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2337 /* No early return if dirty_log_mask is or becomes 0, because
2338 * cpu_physical_memory_set_dirty_range will still call
2339 * xen_modified_memory.
2340 */
2341 if (dirty_log_mask) {
2342 dirty_log_mask =
2343 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002344 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002345 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2346 tb_invalidate_phys_range(addr, addr + length);
2347 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2348 }
2349 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002350}
2351
Richard Henderson23326162013-07-08 14:55:59 -07002352static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002353{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002354 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002355
2356 /* Regions are assumed to support 1-4 byte accesses unless
2357 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002358 if (access_size_max == 0) {
2359 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002360 }
Richard Henderson23326162013-07-08 14:55:59 -07002361
2362 /* Bound the maximum access by the alignment of the address. */
2363 if (!mr->ops->impl.unaligned) {
2364 unsigned align_size_max = addr & -addr;
2365 if (align_size_max != 0 && align_size_max < access_size_max) {
2366 access_size_max = align_size_max;
2367 }
2368 }
2369
2370 /* Don't attempt accesses larger than the maximum. */
2371 if (l > access_size_max) {
2372 l = access_size_max;
2373 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002374 if (l & (l - 1)) {
2375 l = 1 << (qemu_fls(l) - 1);
2376 }
Richard Henderson23326162013-07-08 14:55:59 -07002377
2378 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002379}
2380
Jan Kiszka4840f102015-06-18 18:47:22 +02002381static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002382{
Jan Kiszka4840f102015-06-18 18:47:22 +02002383 bool unlocked = !qemu_mutex_iothread_locked();
2384 bool release_lock = false;
2385
2386 if (unlocked && mr->global_locking) {
2387 qemu_mutex_lock_iothread();
2388 unlocked = false;
2389 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002390 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002391 if (mr->flush_coalesced_mmio) {
2392 if (unlocked) {
2393 qemu_mutex_lock_iothread();
2394 }
2395 qemu_flush_coalesced_mmio_buffer();
2396 if (unlocked) {
2397 qemu_mutex_unlock_iothread();
2398 }
2399 }
2400
2401 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002402}
2403
Peter Maydell5c9eb022015-04-26 16:49:24 +01002404MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2405 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002406{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002407 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002408 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002409 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002410 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002411 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002412 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002413 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002414
Paolo Bonzini41063e12015-03-18 14:21:43 +01002415 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002416 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002417 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002418 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002419
bellard13eb76e2004-01-24 15:23:36 +00002420 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002421 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002422 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002423 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002424 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002425 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002426 switch (l) {
2427 case 8:
2428 /* 64 bit write access */
2429 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002430 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2431 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002432 break;
2433 case 4:
bellard1c213d12005-09-03 10:49:04 +00002434 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002435 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002436 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2437 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002438 break;
2439 case 2:
bellard1c213d12005-09-03 10:49:04 +00002440 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002441 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002442 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2443 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002444 break;
2445 case 1:
bellard1c213d12005-09-03 10:49:04 +00002446 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002447 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002448 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2449 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002450 break;
2451 default:
2452 abort();
bellard13eb76e2004-01-24 15:23:36 +00002453 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002454 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002455 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002456 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002457 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002458 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002459 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002460 }
2461 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002462 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002463 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002464 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002465 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002466 switch (l) {
2467 case 8:
2468 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002469 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2470 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002471 stq_p(buf, val);
2472 break;
2473 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002474 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002475 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2476 attrs);
bellardc27004e2005-01-03 23:35:10 +00002477 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002478 break;
2479 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002480 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002481 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2482 attrs);
bellardc27004e2005-01-03 23:35:10 +00002483 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002484 break;
2485 case 1:
bellard1c213d12005-09-03 10:49:04 +00002486 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002487 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2488 attrs);
bellardc27004e2005-01-03 23:35:10 +00002489 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002490 break;
2491 default:
2492 abort();
bellard13eb76e2004-01-24 15:23:36 +00002493 }
2494 } else {
2495 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002496 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002497 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002498 }
2499 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002500
2501 if (release_lock) {
2502 qemu_mutex_unlock_iothread();
2503 release_lock = false;
2504 }
2505
bellard13eb76e2004-01-24 15:23:36 +00002506 len -= l;
2507 buf += l;
2508 addr += l;
2509 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002510 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002511
Peter Maydell3b643492015-04-26 16:49:23 +01002512 return result;
bellard13eb76e2004-01-24 15:23:36 +00002513}
bellard8df1cd02005-01-28 22:37:22 +00002514
Peter Maydell5c9eb022015-04-26 16:49:24 +01002515MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2516 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002517{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002518 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002519}
2520
Peter Maydell5c9eb022015-04-26 16:49:24 +01002521MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2522 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002523{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002524 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002525}
2526
2527
Avi Kivitya8170e52012-10-23 12:30:10 +02002528void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002529 int len, int is_write)
2530{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002531 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2532 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002533}
2534
Alexander Graf582b55a2013-12-11 14:17:44 +01002535enum write_rom_type {
2536 WRITE_DATA,
2537 FLUSH_CACHE,
2538};
2539
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002540static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002541 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002542{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002543 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002544 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002545 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002546 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002547
Paolo Bonzini41063e12015-03-18 14:21:43 +01002548 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002549 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002550 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002551 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002552
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002553 if (!(memory_region_is_ram(mr) ||
2554 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002555 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002556 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002557 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002558 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002559 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002560 switch (type) {
2561 case WRITE_DATA:
2562 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002563 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002564 break;
2565 case FLUSH_CACHE:
2566 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2567 break;
2568 }
bellardd0ecd2a2006-04-23 17:14:48 +00002569 }
2570 len -= l;
2571 buf += l;
2572 addr += l;
2573 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002574 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002575}
2576
Alexander Graf582b55a2013-12-11 14:17:44 +01002577/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002578void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002579 const uint8_t *buf, int len)
2580{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002581 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002582}
2583
2584void cpu_flush_icache_range(hwaddr start, int len)
2585{
2586 /*
2587 * This function should do the same thing as an icache flush that was
2588 * triggered from within the guest. For TCG we are always cache coherent,
2589 * so there is no need to flush anything. For KVM / Xen we need to flush
2590 * the host's instruction cache at least.
2591 */
2592 if (tcg_enabled()) {
2593 return;
2594 }
2595
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002596 cpu_physical_memory_write_rom_internal(&address_space_memory,
2597 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002598}
2599
aliguori6d16c2f2009-01-22 16:59:11 +00002600typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002601 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002602 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002603 hwaddr addr;
2604 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002605 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002606} BounceBuffer;
2607
2608static BounceBuffer bounce;
2609
aliguoriba223c22009-01-22 16:59:16 +00002610typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002611 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002612 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002613} MapClient;
2614
Fam Zheng38e047b2015-03-16 17:03:35 +08002615QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002616static QLIST_HEAD(map_client_list, MapClient) map_client_list
2617 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002618
Fam Zhenge95205e2015-03-16 17:03:37 +08002619static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002620{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002621 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002622 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002623}
2624
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002625static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002626{
2627 MapClient *client;
2628
Blue Swirl72cf2d42009-09-12 07:36:22 +00002629 while (!QLIST_EMPTY(&map_client_list)) {
2630 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002631 qemu_bh_schedule(client->bh);
2632 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002633 }
2634}
2635
Fam Zhenge95205e2015-03-16 17:03:37 +08002636void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002637{
2638 MapClient *client = g_malloc(sizeof(*client));
2639
Fam Zheng38e047b2015-03-16 17:03:35 +08002640 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002641 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002642 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002643 if (!atomic_read(&bounce.in_use)) {
2644 cpu_notify_map_clients_locked();
2645 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002646 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002647}
2648
Fam Zheng38e047b2015-03-16 17:03:35 +08002649void cpu_exec_init_all(void)
2650{
2651 qemu_mutex_init(&ram_list.mutex);
2652 memory_map_init();
2653 io_mem_init();
2654 qemu_mutex_init(&map_client_list_lock);
2655}
2656
Fam Zhenge95205e2015-03-16 17:03:37 +08002657void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002658{
Fam Zhenge95205e2015-03-16 17:03:37 +08002659 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002660
Fam Zhenge95205e2015-03-16 17:03:37 +08002661 qemu_mutex_lock(&map_client_list_lock);
2662 QLIST_FOREACH(client, &map_client_list, link) {
2663 if (client->bh == bh) {
2664 cpu_unregister_map_client_do(client);
2665 break;
2666 }
2667 }
2668 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002669}
2670
2671static void cpu_notify_map_clients(void)
2672{
Fam Zheng38e047b2015-03-16 17:03:35 +08002673 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002674 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002675 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002676}
2677
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002678bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2679{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002680 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002681 hwaddr l, xlat;
2682
Paolo Bonzini41063e12015-03-18 14:21:43 +01002683 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002684 while (len > 0) {
2685 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002686 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2687 if (!memory_access_is_direct(mr, is_write)) {
2688 l = memory_access_size(mr, l, addr);
2689 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002690 return false;
2691 }
2692 }
2693
2694 len -= l;
2695 addr += l;
2696 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002697 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002698 return true;
2699}
2700
aliguori6d16c2f2009-01-22 16:59:11 +00002701/* Map a physical memory region into a host virtual address.
2702 * May map a subset of the requested range, given by and returned in *plen.
2703 * May return NULL if resources needed to perform the mapping are exhausted.
2704 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002705 * Use cpu_register_map_client() to know when retrying the map operation is
2706 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002707 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002708void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002709 hwaddr addr,
2710 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002711 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002712{
Avi Kivitya8170e52012-10-23 12:30:10 +02002713 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002714 hwaddr done = 0;
2715 hwaddr l, xlat, base;
2716 MemoryRegion *mr, *this_mr;
2717 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002718
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002719 if (len == 0) {
2720 return NULL;
2721 }
aliguori6d16c2f2009-01-22 16:59:11 +00002722
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002723 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002724 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002725 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002726
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002727 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002728 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002729 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002730 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002731 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002732 /* Avoid unbounded allocations */
2733 l = MIN(l, TARGET_PAGE_SIZE);
2734 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002735 bounce.addr = addr;
2736 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002737
2738 memory_region_ref(mr);
2739 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002740 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002741 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2742 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002743 }
aliguori6d16c2f2009-01-22 16:59:11 +00002744
Paolo Bonzini41063e12015-03-18 14:21:43 +01002745 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002746 *plen = l;
2747 return bounce.buffer;
2748 }
2749
2750 base = xlat;
2751 raddr = memory_region_get_ram_addr(mr);
2752
2753 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002754 len -= l;
2755 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002756 done += l;
2757 if (len == 0) {
2758 break;
2759 }
2760
2761 l = len;
2762 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2763 if (this_mr != mr || xlat != base + done) {
2764 break;
2765 }
aliguori6d16c2f2009-01-22 16:59:11 +00002766 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002767
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002768 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002769 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002770 *plen = done;
2771 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002772}
2773
Avi Kivityac1970f2012-10-03 16:22:53 +02002774/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002775 * Will also mark the memory as dirty if is_write == 1. access_len gives
2776 * the amount of memory that was actually read or written by the caller.
2777 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002778void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2779 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002780{
2781 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002782 MemoryRegion *mr;
2783 ram_addr_t addr1;
2784
2785 mr = qemu_ram_addr_from_host(buffer, &addr1);
2786 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002787 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002788 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002789 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002790 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002791 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002792 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002793 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002794 return;
2795 }
2796 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002797 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2798 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002799 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002800 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002801 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002802 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002803 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002804 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002805}
bellardd0ecd2a2006-04-23 17:14:48 +00002806
Avi Kivitya8170e52012-10-23 12:30:10 +02002807void *cpu_physical_memory_map(hwaddr addr,
2808 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002809 int is_write)
2810{
2811 return address_space_map(&address_space_memory, addr, plen, is_write);
2812}
2813
Avi Kivitya8170e52012-10-23 12:30:10 +02002814void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2815 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002816{
2817 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2818}
2819
bellard8df1cd02005-01-28 22:37:22 +00002820/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002821static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2822 MemTxAttrs attrs,
2823 MemTxResult *result,
2824 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002825{
bellard8df1cd02005-01-28 22:37:22 +00002826 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002827 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002828 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002829 hwaddr l = 4;
2830 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002831 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002832 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002833
Paolo Bonzini41063e12015-03-18 14:21:43 +01002834 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002835 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002836 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002837 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002838
bellard8df1cd02005-01-28 22:37:22 +00002839 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002840 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002841#if defined(TARGET_WORDS_BIGENDIAN)
2842 if (endian == DEVICE_LITTLE_ENDIAN) {
2843 val = bswap32(val);
2844 }
2845#else
2846 if (endian == DEVICE_BIG_ENDIAN) {
2847 val = bswap32(val);
2848 }
2849#endif
bellard8df1cd02005-01-28 22:37:22 +00002850 } else {
2851 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002852 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002853 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002854 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002855 switch (endian) {
2856 case DEVICE_LITTLE_ENDIAN:
2857 val = ldl_le_p(ptr);
2858 break;
2859 case DEVICE_BIG_ENDIAN:
2860 val = ldl_be_p(ptr);
2861 break;
2862 default:
2863 val = ldl_p(ptr);
2864 break;
2865 }
Peter Maydell50013112015-04-26 16:49:24 +01002866 r = MEMTX_OK;
2867 }
2868 if (result) {
2869 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002870 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002871 if (release_lock) {
2872 qemu_mutex_unlock_iothread();
2873 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002874 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002875 return val;
2876}
2877
Peter Maydell50013112015-04-26 16:49:24 +01002878uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2879 MemTxAttrs attrs, MemTxResult *result)
2880{
2881 return address_space_ldl_internal(as, addr, attrs, result,
2882 DEVICE_NATIVE_ENDIAN);
2883}
2884
2885uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2886 MemTxAttrs attrs, MemTxResult *result)
2887{
2888 return address_space_ldl_internal(as, addr, attrs, result,
2889 DEVICE_LITTLE_ENDIAN);
2890}
2891
2892uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2893 MemTxAttrs attrs, MemTxResult *result)
2894{
2895 return address_space_ldl_internal(as, addr, attrs, result,
2896 DEVICE_BIG_ENDIAN);
2897}
2898
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002899uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002900{
Peter Maydell50013112015-04-26 16:49:24 +01002901 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002902}
2903
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002904uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002905{
Peter Maydell50013112015-04-26 16:49:24 +01002906 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002907}
2908
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002909uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002910{
Peter Maydell50013112015-04-26 16:49:24 +01002911 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002912}
2913
bellard84b7b8e2005-11-28 21:19:04 +00002914/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002915static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2916 MemTxAttrs attrs,
2917 MemTxResult *result,
2918 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002919{
bellard84b7b8e2005-11-28 21:19:04 +00002920 uint8_t *ptr;
2921 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002922 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002923 hwaddr l = 8;
2924 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002925 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002926 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002927
Paolo Bonzini41063e12015-03-18 14:21:43 +01002928 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002929 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002930 false);
2931 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002932 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002933
bellard84b7b8e2005-11-28 21:19:04 +00002934 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002935 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002936#if defined(TARGET_WORDS_BIGENDIAN)
2937 if (endian == DEVICE_LITTLE_ENDIAN) {
2938 val = bswap64(val);
2939 }
2940#else
2941 if (endian == DEVICE_BIG_ENDIAN) {
2942 val = bswap64(val);
2943 }
2944#endif
bellard84b7b8e2005-11-28 21:19:04 +00002945 } else {
2946 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002947 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002948 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002949 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002950 switch (endian) {
2951 case DEVICE_LITTLE_ENDIAN:
2952 val = ldq_le_p(ptr);
2953 break;
2954 case DEVICE_BIG_ENDIAN:
2955 val = ldq_be_p(ptr);
2956 break;
2957 default:
2958 val = ldq_p(ptr);
2959 break;
2960 }
Peter Maydell50013112015-04-26 16:49:24 +01002961 r = MEMTX_OK;
2962 }
2963 if (result) {
2964 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002965 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002966 if (release_lock) {
2967 qemu_mutex_unlock_iothread();
2968 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002969 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002970 return val;
2971}
2972
Peter Maydell50013112015-04-26 16:49:24 +01002973uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2974 MemTxAttrs attrs, MemTxResult *result)
2975{
2976 return address_space_ldq_internal(as, addr, attrs, result,
2977 DEVICE_NATIVE_ENDIAN);
2978}
2979
2980uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2981 MemTxAttrs attrs, MemTxResult *result)
2982{
2983 return address_space_ldq_internal(as, addr, attrs, result,
2984 DEVICE_LITTLE_ENDIAN);
2985}
2986
2987uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2988 MemTxAttrs attrs, MemTxResult *result)
2989{
2990 return address_space_ldq_internal(as, addr, attrs, result,
2991 DEVICE_BIG_ENDIAN);
2992}
2993
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002994uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002995{
Peter Maydell50013112015-04-26 16:49:24 +01002996 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002997}
2998
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002999uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003000{
Peter Maydell50013112015-04-26 16:49:24 +01003001 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003002}
3003
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003004uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003005{
Peter Maydell50013112015-04-26 16:49:24 +01003006 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007}
3008
bellardaab33092005-10-30 20:48:42 +00003009/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003010uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3011 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003012{
3013 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003014 MemTxResult r;
3015
3016 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3017 if (result) {
3018 *result = r;
3019 }
bellardaab33092005-10-30 20:48:42 +00003020 return val;
3021}
3022
Peter Maydell50013112015-04-26 16:49:24 +01003023uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3024{
3025 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3026}
3027
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003028/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003029static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3030 hwaddr addr,
3031 MemTxAttrs attrs,
3032 MemTxResult *result,
3033 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003034{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003035 uint8_t *ptr;
3036 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003037 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003038 hwaddr l = 2;
3039 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003040 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003041 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003042
Paolo Bonzini41063e12015-03-18 14:21:43 +01003043 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003044 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003045 false);
3046 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003047 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003048
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003049 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003050 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003051#if defined(TARGET_WORDS_BIGENDIAN)
3052 if (endian == DEVICE_LITTLE_ENDIAN) {
3053 val = bswap16(val);
3054 }
3055#else
3056 if (endian == DEVICE_BIG_ENDIAN) {
3057 val = bswap16(val);
3058 }
3059#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003060 } else {
3061 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003062 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003063 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003064 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003065 switch (endian) {
3066 case DEVICE_LITTLE_ENDIAN:
3067 val = lduw_le_p(ptr);
3068 break;
3069 case DEVICE_BIG_ENDIAN:
3070 val = lduw_be_p(ptr);
3071 break;
3072 default:
3073 val = lduw_p(ptr);
3074 break;
3075 }
Peter Maydell50013112015-04-26 16:49:24 +01003076 r = MEMTX_OK;
3077 }
3078 if (result) {
3079 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003080 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003081 if (release_lock) {
3082 qemu_mutex_unlock_iothread();
3083 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003084 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003085 return val;
bellardaab33092005-10-30 20:48:42 +00003086}
3087
Peter Maydell50013112015-04-26 16:49:24 +01003088uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3089 MemTxAttrs attrs, MemTxResult *result)
3090{
3091 return address_space_lduw_internal(as, addr, attrs, result,
3092 DEVICE_NATIVE_ENDIAN);
3093}
3094
3095uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3096 MemTxAttrs attrs, MemTxResult *result)
3097{
3098 return address_space_lduw_internal(as, addr, attrs, result,
3099 DEVICE_LITTLE_ENDIAN);
3100}
3101
3102uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3103 MemTxAttrs attrs, MemTxResult *result)
3104{
3105 return address_space_lduw_internal(as, addr, attrs, result,
3106 DEVICE_BIG_ENDIAN);
3107}
3108
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003109uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003110{
Peter Maydell50013112015-04-26 16:49:24 +01003111 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003112}
3113
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003114uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003115{
Peter Maydell50013112015-04-26 16:49:24 +01003116 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003117}
3118
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003119uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003120{
Peter Maydell50013112015-04-26 16:49:24 +01003121 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003122}
3123
bellard8df1cd02005-01-28 22:37:22 +00003124/* warning: addr must be aligned. The ram page is not masked as dirty
3125 and the code inside is not invalidated. It is useful if the dirty
3126 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003127void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3128 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003129{
bellard8df1cd02005-01-28 22:37:22 +00003130 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003131 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003132 hwaddr l = 4;
3133 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003134 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003135 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003136 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003137
Paolo Bonzini41063e12015-03-18 14:21:43 +01003138 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003139 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003140 true);
3141 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003142 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003143
Peter Maydell50013112015-04-26 16:49:24 +01003144 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003145 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003146 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003147 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003148 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003149
Paolo Bonzini845b6212015-03-23 11:45:53 +01003150 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3151 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003152 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003153 r = MEMTX_OK;
3154 }
3155 if (result) {
3156 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003157 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003158 if (release_lock) {
3159 qemu_mutex_unlock_iothread();
3160 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003161 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003162}
3163
Peter Maydell50013112015-04-26 16:49:24 +01003164void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3165{
3166 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3167}
3168
bellard8df1cd02005-01-28 22:37:22 +00003169/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003170static inline void address_space_stl_internal(AddressSpace *as,
3171 hwaddr addr, uint32_t val,
3172 MemTxAttrs attrs,
3173 MemTxResult *result,
3174 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003175{
bellard8df1cd02005-01-28 22:37:22 +00003176 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003177 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003178 hwaddr l = 4;
3179 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003180 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003181 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003182
Paolo Bonzini41063e12015-03-18 14:21:43 +01003183 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003184 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003185 true);
3186 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003187 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003188
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003189#if defined(TARGET_WORDS_BIGENDIAN)
3190 if (endian == DEVICE_LITTLE_ENDIAN) {
3191 val = bswap32(val);
3192 }
3193#else
3194 if (endian == DEVICE_BIG_ENDIAN) {
3195 val = bswap32(val);
3196 }
3197#endif
Peter Maydell50013112015-04-26 16:49:24 +01003198 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003199 } else {
bellard8df1cd02005-01-28 22:37:22 +00003200 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003201 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003202 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003203 switch (endian) {
3204 case DEVICE_LITTLE_ENDIAN:
3205 stl_le_p(ptr, val);
3206 break;
3207 case DEVICE_BIG_ENDIAN:
3208 stl_be_p(ptr, val);
3209 break;
3210 default:
3211 stl_p(ptr, val);
3212 break;
3213 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003214 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003215 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003216 }
Peter Maydell50013112015-04-26 16:49:24 +01003217 if (result) {
3218 *result = r;
3219 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003220 if (release_lock) {
3221 qemu_mutex_unlock_iothread();
3222 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003223 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003224}
3225
3226void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3227 MemTxAttrs attrs, MemTxResult *result)
3228{
3229 address_space_stl_internal(as, addr, val, attrs, result,
3230 DEVICE_NATIVE_ENDIAN);
3231}
3232
3233void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3234 MemTxAttrs attrs, MemTxResult *result)
3235{
3236 address_space_stl_internal(as, addr, val, attrs, result,
3237 DEVICE_LITTLE_ENDIAN);
3238}
3239
3240void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3241 MemTxAttrs attrs, MemTxResult *result)
3242{
3243 address_space_stl_internal(as, addr, val, attrs, result,
3244 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003245}
3246
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003247void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248{
Peter Maydell50013112015-04-26 16:49:24 +01003249 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250}
3251
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003252void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253{
Peter Maydell50013112015-04-26 16:49:24 +01003254 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255}
3256
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003257void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003258{
Peter Maydell50013112015-04-26 16:49:24 +01003259 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260}
3261
bellardaab33092005-10-30 20:48:42 +00003262/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003263void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3264 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003265{
3266 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003267 MemTxResult r;
3268
3269 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3270 if (result) {
3271 *result = r;
3272 }
3273}
3274
3275void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3276{
3277 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003278}
3279
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003280/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003281static inline void address_space_stw_internal(AddressSpace *as,
3282 hwaddr addr, uint32_t val,
3283 MemTxAttrs attrs,
3284 MemTxResult *result,
3285 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003286{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003287 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003288 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003289 hwaddr l = 2;
3290 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003291 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003292 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003293
Paolo Bonzini41063e12015-03-18 14:21:43 +01003294 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003295 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003296 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003297 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003298
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299#if defined(TARGET_WORDS_BIGENDIAN)
3300 if (endian == DEVICE_LITTLE_ENDIAN) {
3301 val = bswap16(val);
3302 }
3303#else
3304 if (endian == DEVICE_BIG_ENDIAN) {
3305 val = bswap16(val);
3306 }
3307#endif
Peter Maydell50013112015-04-26 16:49:24 +01003308 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003309 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003310 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003311 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003312 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313 switch (endian) {
3314 case DEVICE_LITTLE_ENDIAN:
3315 stw_le_p(ptr, val);
3316 break;
3317 case DEVICE_BIG_ENDIAN:
3318 stw_be_p(ptr, val);
3319 break;
3320 default:
3321 stw_p(ptr, val);
3322 break;
3323 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003324 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003325 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003326 }
Peter Maydell50013112015-04-26 16:49:24 +01003327 if (result) {
3328 *result = r;
3329 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003330 if (release_lock) {
3331 qemu_mutex_unlock_iothread();
3332 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003333 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003334}
3335
3336void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3337 MemTxAttrs attrs, MemTxResult *result)
3338{
3339 address_space_stw_internal(as, addr, val, attrs, result,
3340 DEVICE_NATIVE_ENDIAN);
3341}
3342
3343void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3344 MemTxAttrs attrs, MemTxResult *result)
3345{
3346 address_space_stw_internal(as, addr, val, attrs, result,
3347 DEVICE_LITTLE_ENDIAN);
3348}
3349
3350void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3351 MemTxAttrs attrs, MemTxResult *result)
3352{
3353 address_space_stw_internal(as, addr, val, attrs, result,
3354 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003355}
3356
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003357void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358{
Peter Maydell50013112015-04-26 16:49:24 +01003359 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003360}
3361
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003362void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363{
Peter Maydell50013112015-04-26 16:49:24 +01003364 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365}
3366
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003367void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368{
Peter Maydell50013112015-04-26 16:49:24 +01003369 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370}
3371
bellardaab33092005-10-30 20:48:42 +00003372/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003373void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
3375{
3376 MemTxResult r;
3377 val = tswap64(val);
3378 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3379 if (result) {
3380 *result = r;
3381 }
3382}
3383
3384void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3385 MemTxAttrs attrs, MemTxResult *result)
3386{
3387 MemTxResult r;
3388 val = cpu_to_le64(val);
3389 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3390 if (result) {
3391 *result = r;
3392 }
3393}
3394void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3395 MemTxAttrs attrs, MemTxResult *result)
3396{
3397 MemTxResult r;
3398 val = cpu_to_be64(val);
3399 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3400 if (result) {
3401 *result = r;
3402 }
3403}
3404
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003405void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003406{
Peter Maydell50013112015-04-26 16:49:24 +01003407 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003408}
3409
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003410void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003411{
Peter Maydell50013112015-04-26 16:49:24 +01003412 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413}
3414
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003415void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003416{
Peter Maydell50013112015-04-26 16:49:24 +01003417 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003418}
3419
aliguori5e2972f2009-03-28 17:51:36 +00003420/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003421int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003422 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003423{
3424 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003425 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003426 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003427
3428 while (len > 0) {
3429 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003430 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003431 /* if no physical page mapped, return an error */
3432 if (phys_addr == -1)
3433 return -1;
3434 l = (page + TARGET_PAGE_SIZE) - addr;
3435 if (l > len)
3436 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003437 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003438 if (is_write) {
3439 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3440 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003441 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3442 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003443 }
bellard13eb76e2004-01-24 15:23:36 +00003444 len -= l;
3445 buf += l;
3446 addr += l;
3447 }
3448 return 0;
3449}
Paul Brooka68fe892010-03-01 00:08:59 +00003450#endif
bellard13eb76e2004-01-24 15:23:36 +00003451
Blue Swirl8e4a4242013-01-06 18:30:17 +00003452/*
3453 * A helper function for the _utterly broken_ virtio device model to find out if
3454 * it's running on a big endian machine. Don't do this at home kids!
3455 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003456bool target_words_bigendian(void);
3457bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003458{
3459#if defined(TARGET_WORDS_BIGENDIAN)
3460 return true;
3461#else
3462 return false;
3463#endif
3464}
3465
Wen Congyang76f35532012-05-07 12:04:18 +08003466#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003467bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003468{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003469 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003470 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003471 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003472
Paolo Bonzini41063e12015-03-18 14:21:43 +01003473 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003474 mr = address_space_translate(&address_space_memory,
3475 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003476
Paolo Bonzini41063e12015-03-18 14:21:43 +01003477 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3478 rcu_read_unlock();
3479 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003480}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003481
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003482int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003483{
3484 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003485 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003486
Mike Day0dc3f442013-09-05 14:41:35 -04003487 rcu_read_lock();
3488 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003489 ret = func(block->idstr, block->host, block->offset,
3490 block->used_length, opaque);
3491 if (ret) {
3492 break;
3493 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003494 }
Mike Day0dc3f442013-09-05 14:41:35 -04003495 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003496 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003497}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003498#endif