blob: e2caee98b6f7a6c4584aedff9511605e0d2f82f8 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020093DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andreas Färber1a1562f2013-06-17 04:09:11 +0200481const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200487 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200490 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200495 }
496};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200497
pbrook9656f322008-07-01 20:01:19 +0000498#endif
499
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100500CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400501{
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400503
Andreas Färberbdc44642013-06-24 23:50:24 +0200504 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100505 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200506 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100507 }
Glauber Costa950f1472009-06-09 12:15:18 -0400508 }
509
Andreas Färberbdc44642013-06-24 23:50:24 +0200510 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400511}
512
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
Andreas Färber9349b4f2012-03-14 01:38:32 +0100529void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000530{
Andreas Färber9f09e182012-05-03 06:59:07 +0200531 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100532 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200533 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000534 int cpu_index;
535
pbrookc2764712009-03-07 15:24:59 +0000536#if defined(CONFIG_USER_ONLY)
537 cpu_list_lock();
538#endif
bellard6a00d602005-11-21 23:25:50 +0000539 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200540 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000541 cpu_index++;
542 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100543 cpu->cpu_index = cpu_index;
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100544#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000545 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200546 cpu->thread_id = qemu_get_thread_id();
Paolo Bonzinicba70542015-03-09 15:28:37 +0100547 cpu_reload_memory_map(cpu);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100548#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000550#if defined(CONFIG_USER_ONLY)
551 cpu_list_unlock();
552#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200553 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
554 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
555 }
pbrookb3c77242008-06-30 16:31:04 +0000556#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600557 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000558 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100559 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200560 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000561#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100562 if (cc->vmsd != NULL) {
563 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
564 }
bellardfd6ce8f2003-05-14 19:00:11 +0000565}
566
Paul Brook94df27f2010-02-28 23:47:45 +0000567#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200568static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000569{
570 tb_invalidate_phys_page_range(pc, pc + 1, 0);
571}
572#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200573static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400574{
Max Filippove8262a12013-09-27 22:29:17 +0400575 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
576 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100578 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400579 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400580}
bellardc27004e2005-01-03 23:35:10 +0000581#endif
bellardd720b932004-04-25 17:57:43 +0000582
Paul Brookc527ee82010-03-01 03:31:14 +0000583#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200584void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000585
586{
587}
588
Peter Maydell3ee887e2014-09-12 14:06:48 +0100589int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
590 int flags)
591{
592 return -ENOSYS;
593}
594
595void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
596{
597}
598
Andreas Färber75a34032013-09-02 16:57:02 +0200599int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000600 int flags, CPUWatchpoint **watchpoint)
601{
602 return -ENOSYS;
603}
604#else
pbrook6658ffb2007-03-16 23:58:11 +0000605/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200606int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000607 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000608{
aliguoric0ce9982008-11-25 22:13:57 +0000609 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000610
Peter Maydell05068c02014-09-12 14:06:48 +0100611 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700612 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200613 error_report("tried to set invalid watchpoint at %"
614 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000615 return -EINVAL;
616 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500617 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000618
aliguoria1d1bb32008-11-18 20:07:32 +0000619 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100620 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000621 wp->flags = flags;
622
aliguori2dc9f412008-11-18 20:56:59 +0000623 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200624 if (flags & BP_GDB) {
625 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
626 } else {
627 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
628 }
aliguoria1d1bb32008-11-18 20:07:32 +0000629
Andreas Färber31b030d2013-09-04 01:29:02 +0200630 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000631
632 if (watchpoint)
633 *watchpoint = wp;
634 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000635}
636
aliguoria1d1bb32008-11-18 20:07:32 +0000637/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200638int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000639 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000640{
aliguoria1d1bb32008-11-18 20:07:32 +0000641 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000642
Andreas Färberff4700b2013-08-26 18:23:18 +0200643 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100644 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000645 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200646 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000647 return 0;
648 }
649 }
aliguoria1d1bb32008-11-18 20:07:32 +0000650 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000651}
652
aliguoria1d1bb32008-11-18 20:07:32 +0000653/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200654void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000655{
Andreas Färberff4700b2013-08-26 18:23:18 +0200656 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000657
Andreas Färber31b030d2013-09-04 01:29:02 +0200658 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000659
Anthony Liguori7267c092011-08-20 22:09:37 -0500660 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000661}
662
aliguoria1d1bb32008-11-18 20:07:32 +0000663/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200664void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000665{
aliguoric0ce9982008-11-25 22:13:57 +0000666 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000667
Andreas Färberff4700b2013-08-26 18:23:18 +0200668 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200669 if (wp->flags & mask) {
670 cpu_watchpoint_remove_by_ref(cpu, wp);
671 }
aliguoric0ce9982008-11-25 22:13:57 +0000672 }
aliguoria1d1bb32008-11-18 20:07:32 +0000673}
Peter Maydell05068c02014-09-12 14:06:48 +0100674
675/* Return true if this watchpoint address matches the specified
676 * access (ie the address range covered by the watchpoint overlaps
677 * partially or completely with the address range covered by the
678 * access).
679 */
680static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
681 vaddr addr,
682 vaddr len)
683{
684 /* We know the lengths are non-zero, but a little caution is
685 * required to avoid errors in the case where the range ends
686 * exactly at the top of the address space and so addr + len
687 * wraps round to zero.
688 */
689 vaddr wpend = wp->vaddr + wp->len - 1;
690 vaddr addrend = addr + len - 1;
691
692 return !(addr > wpend || wp->vaddr > addrend);
693}
694
Paul Brookc527ee82010-03-01 03:31:14 +0000695#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000696
697/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200698int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000699 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000700{
aliguoric0ce9982008-11-25 22:13:57 +0000701 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000702
Anthony Liguori7267c092011-08-20 22:09:37 -0500703 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000704
705 bp->pc = pc;
706 bp->flags = flags;
707
aliguori2dc9f412008-11-18 20:56:59 +0000708 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200709 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200710 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200711 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200712 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200713 }
aliguoria1d1bb32008-11-18 20:07:32 +0000714
Andreas Färberf0c3c502013-08-26 21:22:53 +0200715 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000716
Andreas Färber00b941e2013-06-29 18:55:54 +0200717 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000718 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200719 }
aliguoria1d1bb32008-11-18 20:07:32 +0000720 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000721}
722
723/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200724int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000725{
aliguoria1d1bb32008-11-18 20:07:32 +0000726 CPUBreakpoint *bp;
727
Andreas Färberf0c3c502013-08-26 21:22:53 +0200728 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000729 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200730 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000731 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000732 }
bellard4c3a88a2003-07-26 12:06:08 +0000733 }
aliguoria1d1bb32008-11-18 20:07:32 +0000734 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000735}
736
aliguoria1d1bb32008-11-18 20:07:32 +0000737/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200738void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000739{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200740 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
741
742 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000743
Anthony Liguori7267c092011-08-20 22:09:37 -0500744 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000745}
746
747/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200748void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000749{
aliguoric0ce9982008-11-25 22:13:57 +0000750 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000751
Andreas Färberf0c3c502013-08-26 21:22:53 +0200752 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200753 if (bp->flags & mask) {
754 cpu_breakpoint_remove_by_ref(cpu, bp);
755 }
aliguoric0ce9982008-11-25 22:13:57 +0000756 }
bellard4c3a88a2003-07-26 12:06:08 +0000757}
758
bellardc33a3462003-07-29 20:50:33 +0000759/* enable or disable single step mode. EXCP_DEBUG is returned by the
760 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200761void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000762{
Andreas Färbered2803d2013-06-21 20:20:45 +0200763 if (cpu->singlestep_enabled != enabled) {
764 cpu->singlestep_enabled = enabled;
765 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200766 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200767 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100768 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000769 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200770 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000771 tb_flush(env);
772 }
bellardc33a3462003-07-29 20:50:33 +0000773 }
bellardc33a3462003-07-29 20:50:33 +0000774}
775
Andreas Färbera47dddd2013-09-03 17:38:47 +0200776void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000777{
778 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000779 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000780
781 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000782 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000783 fprintf(stderr, "qemu: fatal: ");
784 vfprintf(stderr, fmt, ap);
785 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200786 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000787 if (qemu_log_enabled()) {
788 qemu_log("qemu: fatal: ");
789 qemu_log_vprintf(fmt, ap2);
790 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200791 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000792 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000793 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000794 }
pbrook493ae1f2007-11-23 16:53:59 +0000795 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000796 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200797#if defined(CONFIG_USER_ONLY)
798 {
799 struct sigaction act;
800 sigfillset(&act.sa_mask);
801 act.sa_handler = SIG_DFL;
802 sigaction(SIGABRT, &act, NULL);
803 }
804#endif
bellard75012672003-06-21 13:11:07 +0000805 abort();
806}
807
bellard01243112004-01-04 15:48:17 +0000808#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400809/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200810static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
811{
812 RAMBlock *block;
813
Paolo Bonzini43771532013-09-09 17:58:40 +0200814 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200815 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200816 goto found;
817 }
Mike Day0dc3f442013-09-05 14:41:35 -0400818 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200819 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200820 goto found;
821 }
822 }
823
824 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
825 abort();
826
827found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200828 /* It is safe to write mru_block outside the iothread lock. This
829 * is what happens:
830 *
831 * mru_block = xxx
832 * rcu_read_unlock()
833 * xxx removed from list
834 * rcu_read_lock()
835 * read mru_block
836 * mru_block = NULL;
837 * call_rcu(reclaim_ramblock, xxx);
838 * rcu_read_unlock()
839 *
840 * atomic_rcu_set is not needed here. The block was already published
841 * when it was placed into the list. Here we're just making an extra
842 * copy of the pointer.
843 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200844 ram_list.mru_block = block;
845 return block;
846}
847
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200848static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000849{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200850 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200851 RAMBlock *block;
852 ram_addr_t end;
853
854 end = TARGET_PAGE_ALIGN(start + length);
855 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000856
Mike Day0dc3f442013-09-05 14:41:35 -0400857 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200858 block = qemu_get_ram_block(start);
859 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200860 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000861 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400862 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200863}
864
865/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000866bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
867 ram_addr_t length,
868 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200869{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000870 unsigned long end, page;
871 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200872
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000873 if (length == 0) {
874 return false;
875 }
876
877 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
878 page = start >> TARGET_PAGE_BITS;
879 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
880 page, end - page);
881
882 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200883 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200884 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000885
886 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000887}
888
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100889/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200890hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200891 MemoryRegionSection *section,
892 target_ulong vaddr,
893 hwaddr paddr, hwaddr xlat,
894 int prot,
895 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000896{
Avi Kivitya8170e52012-10-23 12:30:10 +0200897 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000898 CPUWatchpoint *wp;
899
Blue Swirlcc5bea62012-04-14 14:56:48 +0000900 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000901 /* Normal RAM. */
902 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200903 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000904 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200905 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000906 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200907 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000908 }
909 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100910 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200911 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000912 }
913
914 /* Make accesses to pages with watchpoints go via the
915 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200916 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100917 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000918 /* Avoid trapping reads of pages with a write breakpoint. */
919 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200920 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000921 *address |= TLB_MMIO;
922 break;
923 }
924 }
925 }
926
927 return iotlb;
928}
bellard9fa3e852004-01-04 18:06:42 +0000929#endif /* defined(CONFIG_USER_ONLY) */
930
pbrooke2eef172008-06-08 01:09:01 +0000931#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000932
Anthony Liguoric227f092009-10-01 16:12:16 -0500933static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200934 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200935static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200936
Igor Mammedova2b257d2014-10-31 16:38:37 +0000937static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
938 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200939
940/*
941 * Set a custom physical guest memory alloator.
942 * Accelerators with unusual needs may need this. Hopefully, we can
943 * get rid of it eventually.
944 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000945void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200946{
947 phys_mem_alloc = alloc;
948}
949
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200950static uint16_t phys_section_add(PhysPageMap *map,
951 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200952{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200953 /* The physical section number is ORed with a page-aligned
954 * pointer to produce the iotlb entries. Thus it should
955 * never overflow into the page-aligned value.
956 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200957 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200958
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200959 if (map->sections_nb == map->sections_nb_alloc) {
960 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
961 map->sections = g_renew(MemoryRegionSection, map->sections,
962 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200963 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200964 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200965 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200966 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200967}
968
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200969static void phys_section_destroy(MemoryRegion *mr)
970{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200971 memory_region_unref(mr);
972
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200973 if (mr->subpage) {
974 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700975 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200976 g_free(subpage);
977 }
978}
979
Paolo Bonzini60926662013-05-29 12:30:26 +0200980static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200981{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200982 while (map->sections_nb > 0) {
983 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200984 phys_section_destroy(section->mr);
985 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200986 g_free(map->sections);
987 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200988}
989
Avi Kivityac1970f2012-10-03 16:22:53 +0200990static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200991{
992 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200993 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200994 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200995 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200996 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200997 MemoryRegionSection subsection = {
998 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200999 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001000 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001001 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001002
Avi Kivityf3705d52012-03-08 16:16:34 +02001003 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001004
Avi Kivityf3705d52012-03-08 16:16:34 +02001005 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001006 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001007 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001008 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001009 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001010 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001011 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001012 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001013 }
1014 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001015 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001016 subpage_register(subpage, start, end,
1017 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001018}
1019
1020
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001021static void register_multipage(AddressSpaceDispatch *d,
1022 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001023{
Avi Kivitya8170e52012-10-23 12:30:10 +02001024 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001025 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001026 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1027 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001028
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001029 assert(num_pages);
1030 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001031}
1032
Avi Kivityac1970f2012-10-03 16:22:53 +02001033static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001034{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001035 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001036 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001037 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001038 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001039
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001040 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1041 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1042 - now.offset_within_address_space;
1043
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001044 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001045 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001046 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001047 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001048 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001049 while (int128_ne(remain.size, now.size)) {
1050 remain.size = int128_sub(remain.size, now.size);
1051 remain.offset_within_address_space += int128_get64(now.size);
1052 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001053 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001054 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001055 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001056 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001057 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001058 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001059 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001060 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001061 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001062 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001063 }
1064}
1065
Sheng Yang62a27442010-01-26 19:21:16 +08001066void qemu_flush_coalesced_mmio_buffer(void)
1067{
1068 if (kvm_enabled())
1069 kvm_flush_coalesced_mmio_buffer();
1070}
1071
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001072void qemu_mutex_lock_ramlist(void)
1073{
1074 qemu_mutex_lock(&ram_list.mutex);
1075}
1076
1077void qemu_mutex_unlock_ramlist(void)
1078{
1079 qemu_mutex_unlock(&ram_list.mutex);
1080}
1081
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001082#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001083
1084#include <sys/vfs.h>
1085
1086#define HUGETLBFS_MAGIC 0x958458f6
1087
Hu Taofc7a5802014-09-09 13:28:01 +08001088static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001089{
1090 struct statfs fs;
1091 int ret;
1092
1093 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001094 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001095 } while (ret != 0 && errno == EINTR);
1096
1097 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001098 error_setg_errno(errp, errno, "failed to get page size of file %s",
1099 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001100 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001101 }
1102
1103 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001104 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001105
1106 return fs.f_bsize;
1107}
1108
Alex Williamson04b16652010-07-02 11:13:17 -06001109static void *file_ram_alloc(RAMBlock *block,
1110 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001111 const char *path,
1112 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001113{
1114 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001115 char *sanitized_name;
1116 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001117 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001118 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001119 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001120 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001121
Hu Taofc7a5802014-09-09 13:28:01 +08001122 hpagesize = gethugepagesize(path, &local_err);
1123 if (local_err) {
1124 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001125 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001126 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001127 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001128
1129 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001130 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1131 "or larger than huge page size 0x%" PRIx64,
1132 memory, hpagesize);
1133 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001134 }
1135
1136 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001137 error_setg(errp,
1138 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001139 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001140 }
1141
Peter Feiner8ca761f2013-03-04 13:54:25 -05001142 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001143 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001144 for (c = sanitized_name; *c != '\0'; c++) {
1145 if (*c == '/')
1146 *c = '_';
1147 }
1148
1149 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1150 sanitized_name);
1151 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001152
1153 fd = mkstemp(filename);
1154 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001155 error_setg_errno(errp, errno,
1156 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001157 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001158 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001159 }
1160 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001161 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001162
1163 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1164
1165 /*
1166 * ftruncate is not supported by hugetlbfs in older
1167 * hosts, so don't bother bailing out on errors.
1168 * If anything goes wrong with it under other filesystems,
1169 * mmap will fail.
1170 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001171 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001172 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001173 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001174
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001175 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1176 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1177 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001178 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001179 error_setg_errno(errp, errno,
1180 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001181 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001182 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001183 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001184
1185 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001186 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001187 }
1188
Alex Williamson04b16652010-07-02 11:13:17 -06001189 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001190 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001191
1192error:
1193 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001194 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001195 exit(1);
1196 }
1197 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001198}
1199#endif
1200
Mike Day0dc3f442013-09-05 14:41:35 -04001201/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001202static ram_addr_t find_ram_offset(ram_addr_t size)
1203{
Alex Williamson04b16652010-07-02 11:13:17 -06001204 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001205 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001206
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001207 assert(size != 0); /* it would hand out same offset multiple times */
1208
Mike Day0dc3f442013-09-05 14:41:35 -04001209 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001210 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001211 }
Alex Williamson04b16652010-07-02 11:13:17 -06001212
Mike Day0dc3f442013-09-05 14:41:35 -04001213 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001214 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001215
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001216 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001217
Mike Day0dc3f442013-09-05 14:41:35 -04001218 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001219 if (next_block->offset >= end) {
1220 next = MIN(next, next_block->offset);
1221 }
1222 }
1223 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001224 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001225 mingap = next - end;
1226 }
1227 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001228
1229 if (offset == RAM_ADDR_MAX) {
1230 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1231 (uint64_t)size);
1232 abort();
1233 }
1234
Alex Williamson04b16652010-07-02 11:13:17 -06001235 return offset;
1236}
1237
Juan Quintela652d7ec2012-07-20 10:37:54 +02001238ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001239{
Alex Williamsond17b5282010-06-25 11:08:38 -06001240 RAMBlock *block;
1241 ram_addr_t last = 0;
1242
Mike Day0dc3f442013-09-05 14:41:35 -04001243 rcu_read_lock();
1244 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001245 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001246 }
Mike Day0dc3f442013-09-05 14:41:35 -04001247 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001248 return last;
1249}
1250
Jason Baronddb97f12012-08-02 15:44:16 -04001251static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1252{
1253 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001254
1255 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001256 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001257 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1258 if (ret) {
1259 perror("qemu_madvise");
1260 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1261 "but dump_guest_core=off specified\n");
1262 }
1263 }
1264}
1265
Mike Day0dc3f442013-09-05 14:41:35 -04001266/* Called within an RCU critical section, or while the ramlist lock
1267 * is held.
1268 */
Hu Tao20cfe882014-04-02 15:13:26 +08001269static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001270{
Hu Tao20cfe882014-04-02 15:13:26 +08001271 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001272
Mike Day0dc3f442013-09-05 14:41:35 -04001273 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001274 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001275 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001276 }
1277 }
Hu Tao20cfe882014-04-02 15:13:26 +08001278
1279 return NULL;
1280}
1281
Mike Dayae3a7042013-09-05 14:41:35 -04001282/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001283void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1284{
Mike Dayae3a7042013-09-05 14:41:35 -04001285 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001286
Mike Day0dc3f442013-09-05 14:41:35 -04001287 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001288 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001289 assert(new_block);
1290 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001291
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001292 if (dev) {
1293 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001294 if (id) {
1295 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001296 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001297 }
1298 }
1299 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1300
Mike Day0dc3f442013-09-05 14:41:35 -04001301 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001302 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001303 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1304 new_block->idstr);
1305 abort();
1306 }
1307 }
Mike Day0dc3f442013-09-05 14:41:35 -04001308 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001309}
1310
Mike Dayae3a7042013-09-05 14:41:35 -04001311/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001312void qemu_ram_unset_idstr(ram_addr_t addr)
1313{
Mike Dayae3a7042013-09-05 14:41:35 -04001314 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001315
Mike Dayae3a7042013-09-05 14:41:35 -04001316 /* FIXME: arch_init.c assumes that this is not called throughout
1317 * migration. Ignore the problem since hot-unplug during migration
1318 * does not work anyway.
1319 */
1320
Mike Day0dc3f442013-09-05 14:41:35 -04001321 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001322 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001323 if (block) {
1324 memset(block->idstr, 0, sizeof(block->idstr));
1325 }
Mike Day0dc3f442013-09-05 14:41:35 -04001326 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001327}
1328
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001329static int memory_try_enable_merging(void *addr, size_t len)
1330{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001331 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001332 /* disabled by the user */
1333 return 0;
1334 }
1335
1336 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1337}
1338
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001339/* Only legal before guest might have detected the memory size: e.g. on
1340 * incoming migration, or right after reset.
1341 *
1342 * As memory core doesn't know how is memory accessed, it is up to
1343 * resize callback to update device state and/or add assertions to detect
1344 * misuse, if necessary.
1345 */
1346int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1347{
1348 RAMBlock *block = find_ram_block(base);
1349
1350 assert(block);
1351
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001352 newsize = TARGET_PAGE_ALIGN(newsize);
1353
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001354 if (block->used_length == newsize) {
1355 return 0;
1356 }
1357
1358 if (!(block->flags & RAM_RESIZEABLE)) {
1359 error_setg_errno(errp, EINVAL,
1360 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1361 " in != 0x" RAM_ADDR_FMT, block->idstr,
1362 newsize, block->used_length);
1363 return -EINVAL;
1364 }
1365
1366 if (block->max_length < newsize) {
1367 error_setg_errno(errp, EINVAL,
1368 "Length too large: %s: 0x" RAM_ADDR_FMT
1369 " > 0x" RAM_ADDR_FMT, block->idstr,
1370 newsize, block->max_length);
1371 return -EINVAL;
1372 }
1373
1374 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1375 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001376 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1377 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001378 memory_region_set_size(block->mr, newsize);
1379 if (block->resized) {
1380 block->resized(block->idstr, newsize, block->host);
1381 }
1382 return 0;
1383}
1384
Hu Taoef701d72014-09-09 13:27:54 +08001385static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001386{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001387 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001388 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001389 ram_addr_t old_ram_size, new_ram_size;
1390
1391 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001392
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001393 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001394 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001395
1396 if (!new_block->host) {
1397 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001398 xen_ram_alloc(new_block->offset, new_block->max_length,
1399 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001400 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001401 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001402 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001403 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001404 error_setg_errno(errp, errno,
1405 "cannot set up guest memory '%s'",
1406 memory_region_name(new_block->mr));
1407 qemu_mutex_unlock_ramlist();
1408 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001409 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001410 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001411 }
1412 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001413
Li Zhijiandd631692015-07-02 20:18:06 +08001414 new_ram_size = MAX(old_ram_size,
1415 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1416 if (new_ram_size > old_ram_size) {
1417 migration_bitmap_extend(old_ram_size, new_ram_size);
1418 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001419 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1420 * QLIST (which has an RCU-friendly variant) does not have insertion at
1421 * tail, so save the last element in last_block.
1422 */
Mike Day0dc3f442013-09-05 14:41:35 -04001423 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001424 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001425 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001426 break;
1427 }
1428 }
1429 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001430 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001431 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001432 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001433 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001434 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001435 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001436 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001437
Mike Day0dc3f442013-09-05 14:41:35 -04001438 /* Write list before version */
1439 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001440 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001441 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001442
Juan Quintela2152f5c2013-10-08 13:52:02 +02001443 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1444
1445 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001446 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001447
1448 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001449 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1450 ram_list.dirty_memory[i] =
1451 bitmap_zero_extend(ram_list.dirty_memory[i],
1452 old_ram_size, new_ram_size);
1453 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001454 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001455 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001456 new_block->used_length,
1457 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001458
Paolo Bonzinia904c912015-01-21 16:18:35 +01001459 if (new_block->host) {
1460 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1461 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1462 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1463 if (kvm_enabled()) {
1464 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1465 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001466 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001467
1468 return new_block->offset;
1469}
1470
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001471#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001472ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001473 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001474 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001475{
1476 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001477 ram_addr_t addr;
1478 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001479
1480 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001481 error_setg(errp, "-mem-path not supported with Xen");
1482 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001483 }
1484
1485 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1486 /*
1487 * file_ram_alloc() needs to allocate just like
1488 * phys_mem_alloc, but we haven't bothered to provide
1489 * a hook there.
1490 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001491 error_setg(errp,
1492 "-mem-path not supported with this accelerator");
1493 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001494 }
1495
1496 size = TARGET_PAGE_ALIGN(size);
1497 new_block = g_malloc0(sizeof(*new_block));
1498 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001499 new_block->used_length = size;
1500 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001501 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001502 new_block->host = file_ram_alloc(new_block, size,
1503 mem_path, errp);
1504 if (!new_block->host) {
1505 g_free(new_block);
1506 return -1;
1507 }
1508
Hu Taoef701d72014-09-09 13:27:54 +08001509 addr = ram_block_add(new_block, &local_err);
1510 if (local_err) {
1511 g_free(new_block);
1512 error_propagate(errp, local_err);
1513 return -1;
1514 }
1515 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001516}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001517#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001518
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001519static
1520ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1521 void (*resized)(const char*,
1522 uint64_t length,
1523 void *host),
1524 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001525 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001526{
1527 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001528 ram_addr_t addr;
1529 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001530
1531 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001532 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001533 new_block = g_malloc0(sizeof(*new_block));
1534 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001535 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001536 new_block->used_length = size;
1537 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001538 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001539 new_block->fd = -1;
1540 new_block->host = host;
1541 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001542 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001543 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001544 if (resizeable) {
1545 new_block->flags |= RAM_RESIZEABLE;
1546 }
Hu Taoef701d72014-09-09 13:27:54 +08001547 addr = ram_block_add(new_block, &local_err);
1548 if (local_err) {
1549 g_free(new_block);
1550 error_propagate(errp, local_err);
1551 return -1;
1552 }
1553 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001554}
1555
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001556ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1557 MemoryRegion *mr, Error **errp)
1558{
1559 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1560}
1561
Hu Taoef701d72014-09-09 13:27:54 +08001562ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001563{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001564 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1565}
1566
1567ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1568 void (*resized)(const char*,
1569 uint64_t length,
1570 void *host),
1571 MemoryRegion *mr, Error **errp)
1572{
1573 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001574}
bellarde9a1ab12007-02-08 23:08:38 +00001575
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001576void qemu_ram_free_from_ptr(ram_addr_t addr)
1577{
1578 RAMBlock *block;
1579
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001580 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001581 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001582 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001583 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001584 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001585 /* Write list before version */
1586 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001587 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001588 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001589 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001590 }
1591 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001592 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001593}
1594
Paolo Bonzini43771532013-09-09 17:58:40 +02001595static void reclaim_ramblock(RAMBlock *block)
1596{
1597 if (block->flags & RAM_PREALLOC) {
1598 ;
1599 } else if (xen_enabled()) {
1600 xen_invalidate_map_cache_entry(block->host);
1601#ifndef _WIN32
1602 } else if (block->fd >= 0) {
1603 munmap(block->host, block->max_length);
1604 close(block->fd);
1605#endif
1606 } else {
1607 qemu_anon_ram_free(block->host, block->max_length);
1608 }
1609 g_free(block);
1610}
1611
Anthony Liguoric227f092009-10-01 16:12:16 -05001612void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001613{
Alex Williamson04b16652010-07-02 11:13:17 -06001614 RAMBlock *block;
1615
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001616 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001617 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001618 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001619 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001620 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001621 /* Write list before version */
1622 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001623 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001624 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001625 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001626 }
1627 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001628 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001629}
1630
Huang Yingcd19cfa2011-03-02 08:56:19 +01001631#ifndef _WIN32
1632void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1633{
1634 RAMBlock *block;
1635 ram_addr_t offset;
1636 int flags;
1637 void *area, *vaddr;
1638
Mike Day0dc3f442013-09-05 14:41:35 -04001639 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001640 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001641 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001642 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001643 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001644 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001645 } else if (xen_enabled()) {
1646 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001647 } else {
1648 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001649 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001650 flags |= (block->flags & RAM_SHARED ?
1651 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001652 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1653 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001654 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001655 /*
1656 * Remap needs to match alloc. Accelerators that
1657 * set phys_mem_alloc never remap. If they did,
1658 * we'd need a remap hook here.
1659 */
1660 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1661
Huang Yingcd19cfa2011-03-02 08:56:19 +01001662 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1663 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1664 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001665 }
1666 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001667 fprintf(stderr, "Could not remap addr: "
1668 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001669 length, addr);
1670 exit(1);
1671 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001672 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001673 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001674 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001675 }
1676 }
1677}
1678#endif /* !_WIN32 */
1679
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001680int qemu_get_ram_fd(ram_addr_t addr)
1681{
Mike Dayae3a7042013-09-05 14:41:35 -04001682 RAMBlock *block;
1683 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001684
Mike Day0dc3f442013-09-05 14:41:35 -04001685 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001686 block = qemu_get_ram_block(addr);
1687 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001688 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001689 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001690}
1691
Damjan Marion3fd74b82014-06-26 23:01:32 +02001692void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1693{
Mike Dayae3a7042013-09-05 14:41:35 -04001694 RAMBlock *block;
1695 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001696
Mike Day0dc3f442013-09-05 14:41:35 -04001697 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001698 block = qemu_get_ram_block(addr);
1699 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001700 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001701 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001702}
1703
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001704/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001705 * This should not be used for general purpose DMA. Use address_space_map
1706 * or address_space_rw instead. For local memory (e.g. video ram) that the
1707 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001708 *
1709 * By the time this function returns, the returned pointer is not protected
1710 * by RCU anymore. If the caller is not within an RCU critical section and
1711 * does not hold the iothread lock, it must have other means of protecting the
1712 * pointer, such as a reference to the region that includes the incoming
1713 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001714 */
1715void *qemu_get_ram_ptr(ram_addr_t addr)
1716{
Mike Dayae3a7042013-09-05 14:41:35 -04001717 RAMBlock *block;
1718 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001719
Mike Day0dc3f442013-09-05 14:41:35 -04001720 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001721 block = qemu_get_ram_block(addr);
1722
1723 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001724 /* We need to check if the requested address is in the RAM
1725 * because we don't want to map the entire memory in QEMU.
1726 * In that case just map until the end of the page.
1727 */
1728 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001729 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001730 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001731 }
Mike Dayae3a7042013-09-05 14:41:35 -04001732
1733 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001734 }
Mike Dayae3a7042013-09-05 14:41:35 -04001735 ptr = ramblock_ptr(block, addr - block->offset);
1736
Mike Day0dc3f442013-09-05 14:41:35 -04001737unlock:
1738 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001739 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001740}
1741
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001742/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001743 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001744 *
1745 * By the time this function returns, the returned pointer is not protected
1746 * by RCU anymore. If the caller is not within an RCU critical section and
1747 * does not hold the iothread lock, it must have other means of protecting the
1748 * pointer, such as a reference to the region that includes the incoming
1749 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001750 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001751static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001752{
Mike Dayae3a7042013-09-05 14:41:35 -04001753 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001754 if (*size == 0) {
1755 return NULL;
1756 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001757 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001758 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001759 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001760 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001761 rcu_read_lock();
1762 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001763 if (addr - block->offset < block->max_length) {
1764 if (addr - block->offset + *size > block->max_length)
1765 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001766 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001767 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001768 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001769 }
1770 }
1771
1772 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1773 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001774 }
1775}
1776
Paolo Bonzini7443b432013-06-03 12:44:02 +02001777/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001778 * (typically a TLB entry) back to a ram offset.
1779 *
1780 * By the time this function returns, the returned pointer is not protected
1781 * by RCU anymore. If the caller is not within an RCU critical section and
1782 * does not hold the iothread lock, it must have other means of protecting the
1783 * pointer, such as a reference to the region that includes the incoming
1784 * ram_addr_t.
1785 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001786MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001787{
pbrook94a6b542009-04-11 17:15:54 +00001788 RAMBlock *block;
1789 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001790 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001791
Jan Kiszka868bb332011-06-21 22:59:09 +02001792 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001793 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001794 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001795 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001796 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001797 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001798 }
1799
Mike Day0dc3f442013-09-05 14:41:35 -04001800 rcu_read_lock();
1801 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001802 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001803 goto found;
1804 }
1805
Mike Day0dc3f442013-09-05 14:41:35 -04001806 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001807 /* This case append when the block is not mapped. */
1808 if (block->host == NULL) {
1809 continue;
1810 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001811 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001812 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001813 }
pbrook94a6b542009-04-11 17:15:54 +00001814 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001815
Mike Day0dc3f442013-09-05 14:41:35 -04001816 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001817 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001818
1819found:
1820 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001821 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001822 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001823 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001824}
Alex Williamsonf471a172010-06-11 11:11:42 -06001825
Avi Kivitya8170e52012-10-23 12:30:10 +02001826static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001827 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001828{
Juan Quintela52159192013-10-08 12:44:04 +02001829 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001830 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001831 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001832 switch (size) {
1833 case 1:
1834 stb_p(qemu_get_ram_ptr(ram_addr), val);
1835 break;
1836 case 2:
1837 stw_p(qemu_get_ram_ptr(ram_addr), val);
1838 break;
1839 case 4:
1840 stl_p(qemu_get_ram_ptr(ram_addr), val);
1841 break;
1842 default:
1843 abort();
1844 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001845 /* Set both VGA and migration bits for simplicity and to remove
1846 * the notdirty callback faster.
1847 */
1848 cpu_physical_memory_set_dirty_range(ram_addr, size,
1849 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001850 /* we remove the notdirty callback only if the code has been
1851 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001852 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001853 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001854 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001855 }
bellard1ccde1c2004-02-06 19:46:14 +00001856}
1857
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001858static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1859 unsigned size, bool is_write)
1860{
1861 return is_write;
1862}
1863
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001864static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001865 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001866 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001867 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001868};
1869
pbrook0f459d12008-06-09 00:20:13 +00001870/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001871static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001872{
Andreas Färber93afead2013-08-26 03:41:01 +02001873 CPUState *cpu = current_cpu;
1874 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001875 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001876 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001877 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001878 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001879
Andreas Färberff4700b2013-08-26 18:23:18 +02001880 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001881 /* We re-entered the check after replacing the TB. Now raise
1882 * the debug interrupt so that is will trigger after the
1883 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001884 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001885 return;
1886 }
Andreas Färber93afead2013-08-26 03:41:01 +02001887 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001888 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001889 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1890 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001891 if (flags == BP_MEM_READ) {
1892 wp->flags |= BP_WATCHPOINT_HIT_READ;
1893 } else {
1894 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1895 }
1896 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001897 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001898 if (!cpu->watchpoint_hit) {
1899 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001900 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001901 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001902 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001903 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001904 } else {
1905 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001906 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001907 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001908 }
aliguori06d55cc2008-11-18 20:24:06 +00001909 }
aliguori6e140f22008-11-18 20:37:55 +00001910 } else {
1911 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001912 }
1913 }
1914}
1915
pbrook6658ffb2007-03-16 23:58:11 +00001916/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1917 so these check for a hit then pass through to the normal out-of-line
1918 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001919static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1920 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001921{
Peter Maydell66b9b432015-04-26 16:49:24 +01001922 MemTxResult res;
1923 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001924
Peter Maydell66b9b432015-04-26 16:49:24 +01001925 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001926 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001927 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001928 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001929 break;
1930 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001931 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001932 break;
1933 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001934 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001935 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001936 default: abort();
1937 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001938 *pdata = data;
1939 return res;
1940}
1941
1942static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1943 uint64_t val, unsigned size,
1944 MemTxAttrs attrs)
1945{
1946 MemTxResult res;
1947
1948 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1949 switch (size) {
1950 case 1:
1951 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1952 break;
1953 case 2:
1954 address_space_stw(&address_space_memory, addr, val, attrs, &res);
1955 break;
1956 case 4:
1957 address_space_stl(&address_space_memory, addr, val, attrs, &res);
1958 break;
1959 default: abort();
1960 }
1961 return res;
pbrook6658ffb2007-03-16 23:58:11 +00001962}
1963
Avi Kivity1ec9b902012-01-02 12:47:48 +02001964static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01001965 .read_with_attrs = watch_mem_read,
1966 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001967 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001968};
pbrook6658ffb2007-03-16 23:58:11 +00001969
Peter Maydellf25a49e2015-04-26 16:49:24 +01001970static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1971 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001972{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001973 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001974 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01001975 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001976
blueswir1db7b5422007-05-26 17:36:03 +00001977#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001978 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001979 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001980#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01001981 res = address_space_read(subpage->as, addr + subpage->base,
1982 attrs, buf, len);
1983 if (res) {
1984 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01001985 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001986 switch (len) {
1987 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001988 *data = ldub_p(buf);
1989 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001990 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001991 *data = lduw_p(buf);
1992 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001993 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001994 *data = ldl_p(buf);
1995 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001996 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001997 *data = ldq_p(buf);
1998 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001999 default:
2000 abort();
2001 }
blueswir1db7b5422007-05-26 17:36:03 +00002002}
2003
Peter Maydellf25a49e2015-04-26 16:49:24 +01002004static MemTxResult subpage_write(void *opaque, hwaddr addr,
2005 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002006{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002007 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002008 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002009
blueswir1db7b5422007-05-26 17:36:03 +00002010#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002011 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002012 " value %"PRIx64"\n",
2013 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002014#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002015 switch (len) {
2016 case 1:
2017 stb_p(buf, value);
2018 break;
2019 case 2:
2020 stw_p(buf, value);
2021 break;
2022 case 4:
2023 stl_p(buf, value);
2024 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002025 case 8:
2026 stq_p(buf, value);
2027 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002028 default:
2029 abort();
2030 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002031 return address_space_write(subpage->as, addr + subpage->base,
2032 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002033}
2034
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002035static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002036 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002037{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002038 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002039#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002040 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002041 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002042#endif
2043
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002044 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002045 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002046}
2047
Avi Kivity70c68e42012-01-02 12:32:48 +02002048static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002049 .read_with_attrs = subpage_read,
2050 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002051 .impl.min_access_size = 1,
2052 .impl.max_access_size = 8,
2053 .valid.min_access_size = 1,
2054 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002055 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002056 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002057};
2058
Anthony Liguoric227f092009-10-01 16:12:16 -05002059static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002060 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002061{
2062 int idx, eidx;
2063
2064 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2065 return -1;
2066 idx = SUBPAGE_IDX(start);
2067 eidx = SUBPAGE_IDX(end);
2068#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002069 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2070 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002071#endif
blueswir1db7b5422007-05-26 17:36:03 +00002072 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002073 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002074 }
2075
2076 return 0;
2077}
2078
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002079static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002080{
Anthony Liguoric227f092009-10-01 16:12:16 -05002081 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002082
Anthony Liguori7267c092011-08-20 22:09:37 -05002083 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002084
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002085 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002086 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002087 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002088 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002089 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002090#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002091 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2092 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002093#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002094 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002095
2096 return mmio;
2097}
2098
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002099static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2100 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002101{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002102 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002103 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002104 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002105 .mr = mr,
2106 .offset_within_address_space = 0,
2107 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002108 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002109 };
2110
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002111 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002112}
2113
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002114MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002115{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002116 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2117 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002118
2119 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002120}
2121
Avi Kivitye9179ce2009-06-14 11:38:52 +03002122static void io_mem_init(void)
2123{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002124 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002125 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002126 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002127 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002128 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002129 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002130 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002131}
2132
Avi Kivityac1970f2012-10-03 16:22:53 +02002133static void mem_begin(MemoryListener *listener)
2134{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002135 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002136 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2137 uint16_t n;
2138
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002139 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002140 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002141 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002142 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002143 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002144 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002145 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002146 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002147
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002148 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002149 d->as = as;
2150 as->next_dispatch = d;
2151}
2152
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002153static void address_space_dispatch_free(AddressSpaceDispatch *d)
2154{
2155 phys_sections_free(&d->map);
2156 g_free(d);
2157}
2158
Paolo Bonzini00752702013-05-29 12:13:54 +02002159static void mem_commit(MemoryListener *listener)
2160{
2161 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002162 AddressSpaceDispatch *cur = as->dispatch;
2163 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002164
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002165 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002166
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002167 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002168 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002169 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002170 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002171}
2172
Avi Kivity1d711482012-10-02 18:54:45 +02002173static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002174{
Andreas Färber182735e2013-05-29 22:29:20 +02002175 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002176
2177 /* since each CPU stores ram addresses in its TLB cache, we must
2178 reset the modified entries */
2179 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002180 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002181 /* FIXME: Disentangle the cpu.h circular files deps so we can
2182 directly get the right CPU from listener. */
2183 if (cpu->tcg_as_listener != listener) {
2184 continue;
2185 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002186 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002187 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002188}
2189
Avi Kivityac1970f2012-10-03 16:22:53 +02002190void address_space_init_dispatch(AddressSpace *as)
2191{
Paolo Bonzini00752702013-05-29 12:13:54 +02002192 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002193 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002194 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002195 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002196 .region_add = mem_add,
2197 .region_nop = mem_add,
2198 .priority = 0,
2199 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002200 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002201}
2202
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002203void address_space_unregister(AddressSpace *as)
2204{
2205 memory_listener_unregister(&as->dispatch_listener);
2206}
2207
Avi Kivity83f3c252012-10-07 12:59:55 +02002208void address_space_destroy_dispatch(AddressSpace *as)
2209{
2210 AddressSpaceDispatch *d = as->dispatch;
2211
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002212 atomic_rcu_set(&as->dispatch, NULL);
2213 if (d) {
2214 call_rcu(d, address_space_dispatch_free, rcu);
2215 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002216}
2217
Avi Kivity62152b82011-07-26 14:26:14 +03002218static void memory_map_init(void)
2219{
Anthony Liguori7267c092011-08-20 22:09:37 -05002220 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002221
Paolo Bonzini57271d62013-11-07 17:14:37 +01002222 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002223 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002224
Anthony Liguori7267c092011-08-20 22:09:37 -05002225 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002226 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2227 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002228 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002229}
2230
2231MemoryRegion *get_system_memory(void)
2232{
2233 return system_memory;
2234}
2235
Avi Kivity309cb472011-08-08 16:09:03 +03002236MemoryRegion *get_system_io(void)
2237{
2238 return system_io;
2239}
2240
pbrooke2eef172008-06-08 01:09:01 +00002241#endif /* !defined(CONFIG_USER_ONLY) */
2242
bellard13eb76e2004-01-24 15:23:36 +00002243/* physical memory access (slow version, mainly for debug) */
2244#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002245int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002246 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002247{
2248 int l, flags;
2249 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002250 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002251
2252 while (len > 0) {
2253 page = addr & TARGET_PAGE_MASK;
2254 l = (page + TARGET_PAGE_SIZE) - addr;
2255 if (l > len)
2256 l = len;
2257 flags = page_get_flags(page);
2258 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002259 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002260 if (is_write) {
2261 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002262 return -1;
bellard579a97f2007-11-11 14:26:47 +00002263 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002264 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002265 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002266 memcpy(p, buf, l);
2267 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002268 } else {
2269 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002270 return -1;
bellard579a97f2007-11-11 14:26:47 +00002271 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002272 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002273 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002274 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002275 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002276 }
2277 len -= l;
2278 buf += l;
2279 addr += l;
2280 }
Paul Brooka68fe892010-03-01 00:08:59 +00002281 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002282}
bellard8df1cd02005-01-28 22:37:22 +00002283
bellard13eb76e2004-01-24 15:23:36 +00002284#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002285
Paolo Bonzini845b6212015-03-23 11:45:53 +01002286static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002287 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002288{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002289 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2290 /* No early return if dirty_log_mask is or becomes 0, because
2291 * cpu_physical_memory_set_dirty_range will still call
2292 * xen_modified_memory.
2293 */
2294 if (dirty_log_mask) {
2295 dirty_log_mask =
2296 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002297 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002298 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2299 tb_invalidate_phys_range(addr, addr + length);
2300 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2301 }
2302 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002303}
2304
Richard Henderson23326162013-07-08 14:55:59 -07002305static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002306{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002307 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002308
2309 /* Regions are assumed to support 1-4 byte accesses unless
2310 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002311 if (access_size_max == 0) {
2312 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002313 }
Richard Henderson23326162013-07-08 14:55:59 -07002314
2315 /* Bound the maximum access by the alignment of the address. */
2316 if (!mr->ops->impl.unaligned) {
2317 unsigned align_size_max = addr & -addr;
2318 if (align_size_max != 0 && align_size_max < access_size_max) {
2319 access_size_max = align_size_max;
2320 }
2321 }
2322
2323 /* Don't attempt accesses larger than the maximum. */
2324 if (l > access_size_max) {
2325 l = access_size_max;
2326 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002327 if (l & (l - 1)) {
2328 l = 1 << (qemu_fls(l) - 1);
2329 }
Richard Henderson23326162013-07-08 14:55:59 -07002330
2331 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002332}
2333
Jan Kiszka4840f102015-06-18 18:47:22 +02002334static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002335{
Jan Kiszka4840f102015-06-18 18:47:22 +02002336 bool unlocked = !qemu_mutex_iothread_locked();
2337 bool release_lock = false;
2338
2339 if (unlocked && mr->global_locking) {
2340 qemu_mutex_lock_iothread();
2341 unlocked = false;
2342 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002343 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002344 if (mr->flush_coalesced_mmio) {
2345 if (unlocked) {
2346 qemu_mutex_lock_iothread();
2347 }
2348 qemu_flush_coalesced_mmio_buffer();
2349 if (unlocked) {
2350 qemu_mutex_unlock_iothread();
2351 }
2352 }
2353
2354 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002355}
2356
Peter Maydell5c9eb022015-04-26 16:49:24 +01002357MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2358 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002359{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002360 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002361 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002362 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002363 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002364 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002365 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002366 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002367
Paolo Bonzini41063e12015-03-18 14:21:43 +01002368 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002369 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002370 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002371 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002372
bellard13eb76e2004-01-24 15:23:36 +00002373 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002374 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002375 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002376 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002377 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002378 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002379 switch (l) {
2380 case 8:
2381 /* 64 bit write access */
2382 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002383 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2384 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002385 break;
2386 case 4:
bellard1c213d12005-09-03 10:49:04 +00002387 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002388 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002389 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2390 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002391 break;
2392 case 2:
bellard1c213d12005-09-03 10:49:04 +00002393 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002394 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002395 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2396 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002397 break;
2398 case 1:
bellard1c213d12005-09-03 10:49:04 +00002399 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002400 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002401 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2402 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002403 break;
2404 default:
2405 abort();
bellard13eb76e2004-01-24 15:23:36 +00002406 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002407 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002408 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002409 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002410 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002411 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002412 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002413 }
2414 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002415 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002416 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002417 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002418 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002419 switch (l) {
2420 case 8:
2421 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002422 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2423 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002424 stq_p(buf, val);
2425 break;
2426 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002427 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002428 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2429 attrs);
bellardc27004e2005-01-03 23:35:10 +00002430 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002431 break;
2432 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002433 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002434 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2435 attrs);
bellardc27004e2005-01-03 23:35:10 +00002436 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002437 break;
2438 case 1:
bellard1c213d12005-09-03 10:49:04 +00002439 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002440 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2441 attrs);
bellardc27004e2005-01-03 23:35:10 +00002442 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002443 break;
2444 default:
2445 abort();
bellard13eb76e2004-01-24 15:23:36 +00002446 }
2447 } else {
2448 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002449 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002450 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002451 }
2452 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002453
2454 if (release_lock) {
2455 qemu_mutex_unlock_iothread();
2456 release_lock = false;
2457 }
2458
bellard13eb76e2004-01-24 15:23:36 +00002459 len -= l;
2460 buf += l;
2461 addr += l;
2462 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002463 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002464
Peter Maydell3b643492015-04-26 16:49:23 +01002465 return result;
bellard13eb76e2004-01-24 15:23:36 +00002466}
bellard8df1cd02005-01-28 22:37:22 +00002467
Peter Maydell5c9eb022015-04-26 16:49:24 +01002468MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2469 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002470{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002471 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002472}
2473
Peter Maydell5c9eb022015-04-26 16:49:24 +01002474MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2475 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002476{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002477 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002478}
2479
2480
Avi Kivitya8170e52012-10-23 12:30:10 +02002481void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002482 int len, int is_write)
2483{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002484 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2485 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002486}
2487
Alexander Graf582b55a2013-12-11 14:17:44 +01002488enum write_rom_type {
2489 WRITE_DATA,
2490 FLUSH_CACHE,
2491};
2492
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002493static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002494 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002495{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002496 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002497 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002498 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002499 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002500
Paolo Bonzini41063e12015-03-18 14:21:43 +01002501 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002502 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002503 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002504 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002505
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002506 if (!(memory_region_is_ram(mr) ||
2507 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002508 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002509 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002510 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002511 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002512 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002513 switch (type) {
2514 case WRITE_DATA:
2515 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002516 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002517 break;
2518 case FLUSH_CACHE:
2519 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2520 break;
2521 }
bellardd0ecd2a2006-04-23 17:14:48 +00002522 }
2523 len -= l;
2524 buf += l;
2525 addr += l;
2526 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002527 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002528}
2529
Alexander Graf582b55a2013-12-11 14:17:44 +01002530/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002531void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002532 const uint8_t *buf, int len)
2533{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002534 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002535}
2536
2537void cpu_flush_icache_range(hwaddr start, int len)
2538{
2539 /*
2540 * This function should do the same thing as an icache flush that was
2541 * triggered from within the guest. For TCG we are always cache coherent,
2542 * so there is no need to flush anything. For KVM / Xen we need to flush
2543 * the host's instruction cache at least.
2544 */
2545 if (tcg_enabled()) {
2546 return;
2547 }
2548
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002549 cpu_physical_memory_write_rom_internal(&address_space_memory,
2550 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002551}
2552
aliguori6d16c2f2009-01-22 16:59:11 +00002553typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002554 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002555 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002556 hwaddr addr;
2557 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002558 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002559} BounceBuffer;
2560
2561static BounceBuffer bounce;
2562
aliguoriba223c22009-01-22 16:59:16 +00002563typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002564 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002565 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002566} MapClient;
2567
Fam Zheng38e047b2015-03-16 17:03:35 +08002568QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002569static QLIST_HEAD(map_client_list, MapClient) map_client_list
2570 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002571
Fam Zhenge95205e2015-03-16 17:03:37 +08002572static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002573{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002574 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002575 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002576}
2577
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002578static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002579{
2580 MapClient *client;
2581
Blue Swirl72cf2d42009-09-12 07:36:22 +00002582 while (!QLIST_EMPTY(&map_client_list)) {
2583 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002584 qemu_bh_schedule(client->bh);
2585 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002586 }
2587}
2588
Fam Zhenge95205e2015-03-16 17:03:37 +08002589void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002590{
2591 MapClient *client = g_malloc(sizeof(*client));
2592
Fam Zheng38e047b2015-03-16 17:03:35 +08002593 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002594 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002595 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002596 if (!atomic_read(&bounce.in_use)) {
2597 cpu_notify_map_clients_locked();
2598 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002599 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002600}
2601
Fam Zheng38e047b2015-03-16 17:03:35 +08002602void cpu_exec_init_all(void)
2603{
2604 qemu_mutex_init(&ram_list.mutex);
2605 memory_map_init();
2606 io_mem_init();
2607 qemu_mutex_init(&map_client_list_lock);
2608}
2609
Fam Zhenge95205e2015-03-16 17:03:37 +08002610void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002611{
Fam Zhenge95205e2015-03-16 17:03:37 +08002612 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002613
Fam Zhenge95205e2015-03-16 17:03:37 +08002614 qemu_mutex_lock(&map_client_list_lock);
2615 QLIST_FOREACH(client, &map_client_list, link) {
2616 if (client->bh == bh) {
2617 cpu_unregister_map_client_do(client);
2618 break;
2619 }
2620 }
2621 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002622}
2623
2624static void cpu_notify_map_clients(void)
2625{
Fam Zheng38e047b2015-03-16 17:03:35 +08002626 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002627 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002628 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002629}
2630
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002631bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2632{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002633 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002634 hwaddr l, xlat;
2635
Paolo Bonzini41063e12015-03-18 14:21:43 +01002636 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002637 while (len > 0) {
2638 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002639 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2640 if (!memory_access_is_direct(mr, is_write)) {
2641 l = memory_access_size(mr, l, addr);
2642 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002643 return false;
2644 }
2645 }
2646
2647 len -= l;
2648 addr += l;
2649 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002650 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002651 return true;
2652}
2653
aliguori6d16c2f2009-01-22 16:59:11 +00002654/* Map a physical memory region into a host virtual address.
2655 * May map a subset of the requested range, given by and returned in *plen.
2656 * May return NULL if resources needed to perform the mapping are exhausted.
2657 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002658 * Use cpu_register_map_client() to know when retrying the map operation is
2659 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002660 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002661void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002662 hwaddr addr,
2663 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002664 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002665{
Avi Kivitya8170e52012-10-23 12:30:10 +02002666 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002667 hwaddr done = 0;
2668 hwaddr l, xlat, base;
2669 MemoryRegion *mr, *this_mr;
2670 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002671
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002672 if (len == 0) {
2673 return NULL;
2674 }
aliguori6d16c2f2009-01-22 16:59:11 +00002675
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002676 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002677 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002678 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002679
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002680 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002681 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002682 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002683 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002684 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002685 /* Avoid unbounded allocations */
2686 l = MIN(l, TARGET_PAGE_SIZE);
2687 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002688 bounce.addr = addr;
2689 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002690
2691 memory_region_ref(mr);
2692 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002693 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002694 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2695 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002696 }
aliguori6d16c2f2009-01-22 16:59:11 +00002697
Paolo Bonzini41063e12015-03-18 14:21:43 +01002698 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002699 *plen = l;
2700 return bounce.buffer;
2701 }
2702
2703 base = xlat;
2704 raddr = memory_region_get_ram_addr(mr);
2705
2706 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002707 len -= l;
2708 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002709 done += l;
2710 if (len == 0) {
2711 break;
2712 }
2713
2714 l = len;
2715 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2716 if (this_mr != mr || xlat != base + done) {
2717 break;
2718 }
aliguori6d16c2f2009-01-22 16:59:11 +00002719 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002720
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002721 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002722 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002723 *plen = done;
2724 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002725}
2726
Avi Kivityac1970f2012-10-03 16:22:53 +02002727/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002728 * Will also mark the memory as dirty if is_write == 1. access_len gives
2729 * the amount of memory that was actually read or written by the caller.
2730 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002731void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2732 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002733{
2734 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002735 MemoryRegion *mr;
2736 ram_addr_t addr1;
2737
2738 mr = qemu_ram_addr_from_host(buffer, &addr1);
2739 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002740 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002741 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002742 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002743 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002744 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002745 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002746 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002747 return;
2748 }
2749 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002750 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2751 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002752 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002753 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002754 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002755 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002756 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002757 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002758}
bellardd0ecd2a2006-04-23 17:14:48 +00002759
Avi Kivitya8170e52012-10-23 12:30:10 +02002760void *cpu_physical_memory_map(hwaddr addr,
2761 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002762 int is_write)
2763{
2764 return address_space_map(&address_space_memory, addr, plen, is_write);
2765}
2766
Avi Kivitya8170e52012-10-23 12:30:10 +02002767void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2768 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002769{
2770 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2771}
2772
bellard8df1cd02005-01-28 22:37:22 +00002773/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002774static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2775 MemTxAttrs attrs,
2776 MemTxResult *result,
2777 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002778{
bellard8df1cd02005-01-28 22:37:22 +00002779 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002780 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002781 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002782 hwaddr l = 4;
2783 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002784 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002785 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002786
Paolo Bonzini41063e12015-03-18 14:21:43 +01002787 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002788 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002789 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002790 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002791
bellard8df1cd02005-01-28 22:37:22 +00002792 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002793 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002794#if defined(TARGET_WORDS_BIGENDIAN)
2795 if (endian == DEVICE_LITTLE_ENDIAN) {
2796 val = bswap32(val);
2797 }
2798#else
2799 if (endian == DEVICE_BIG_ENDIAN) {
2800 val = bswap32(val);
2801 }
2802#endif
bellard8df1cd02005-01-28 22:37:22 +00002803 } else {
2804 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002805 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002806 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002807 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002808 switch (endian) {
2809 case DEVICE_LITTLE_ENDIAN:
2810 val = ldl_le_p(ptr);
2811 break;
2812 case DEVICE_BIG_ENDIAN:
2813 val = ldl_be_p(ptr);
2814 break;
2815 default:
2816 val = ldl_p(ptr);
2817 break;
2818 }
Peter Maydell50013112015-04-26 16:49:24 +01002819 r = MEMTX_OK;
2820 }
2821 if (result) {
2822 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002823 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002824 if (release_lock) {
2825 qemu_mutex_unlock_iothread();
2826 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002827 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002828 return val;
2829}
2830
Peter Maydell50013112015-04-26 16:49:24 +01002831uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2832 MemTxAttrs attrs, MemTxResult *result)
2833{
2834 return address_space_ldl_internal(as, addr, attrs, result,
2835 DEVICE_NATIVE_ENDIAN);
2836}
2837
2838uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2839 MemTxAttrs attrs, MemTxResult *result)
2840{
2841 return address_space_ldl_internal(as, addr, attrs, result,
2842 DEVICE_LITTLE_ENDIAN);
2843}
2844
2845uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2846 MemTxAttrs attrs, MemTxResult *result)
2847{
2848 return address_space_ldl_internal(as, addr, attrs, result,
2849 DEVICE_BIG_ENDIAN);
2850}
2851
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002852uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002853{
Peter Maydell50013112015-04-26 16:49:24 +01002854 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002855}
2856
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002857uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002858{
Peter Maydell50013112015-04-26 16:49:24 +01002859 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002860}
2861
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002862uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002863{
Peter Maydell50013112015-04-26 16:49:24 +01002864 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002865}
2866
bellard84b7b8e2005-11-28 21:19:04 +00002867/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002868static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2869 MemTxAttrs attrs,
2870 MemTxResult *result,
2871 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002872{
bellard84b7b8e2005-11-28 21:19:04 +00002873 uint8_t *ptr;
2874 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002875 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002876 hwaddr l = 8;
2877 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002878 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002879 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002880
Paolo Bonzini41063e12015-03-18 14:21:43 +01002881 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002882 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002883 false);
2884 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002885 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002886
bellard84b7b8e2005-11-28 21:19:04 +00002887 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002888 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002889#if defined(TARGET_WORDS_BIGENDIAN)
2890 if (endian == DEVICE_LITTLE_ENDIAN) {
2891 val = bswap64(val);
2892 }
2893#else
2894 if (endian == DEVICE_BIG_ENDIAN) {
2895 val = bswap64(val);
2896 }
2897#endif
bellard84b7b8e2005-11-28 21:19:04 +00002898 } else {
2899 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002900 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002901 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002902 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002903 switch (endian) {
2904 case DEVICE_LITTLE_ENDIAN:
2905 val = ldq_le_p(ptr);
2906 break;
2907 case DEVICE_BIG_ENDIAN:
2908 val = ldq_be_p(ptr);
2909 break;
2910 default:
2911 val = ldq_p(ptr);
2912 break;
2913 }
Peter Maydell50013112015-04-26 16:49:24 +01002914 r = MEMTX_OK;
2915 }
2916 if (result) {
2917 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002918 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002919 if (release_lock) {
2920 qemu_mutex_unlock_iothread();
2921 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002922 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002923 return val;
2924}
2925
Peter Maydell50013112015-04-26 16:49:24 +01002926uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2927 MemTxAttrs attrs, MemTxResult *result)
2928{
2929 return address_space_ldq_internal(as, addr, attrs, result,
2930 DEVICE_NATIVE_ENDIAN);
2931}
2932
2933uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2934 MemTxAttrs attrs, MemTxResult *result)
2935{
2936 return address_space_ldq_internal(as, addr, attrs, result,
2937 DEVICE_LITTLE_ENDIAN);
2938}
2939
2940uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2941 MemTxAttrs attrs, MemTxResult *result)
2942{
2943 return address_space_ldq_internal(as, addr, attrs, result,
2944 DEVICE_BIG_ENDIAN);
2945}
2946
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002947uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002948{
Peter Maydell50013112015-04-26 16:49:24 +01002949 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002950}
2951
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002952uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002953{
Peter Maydell50013112015-04-26 16:49:24 +01002954 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002955}
2956
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002957uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002958{
Peter Maydell50013112015-04-26 16:49:24 +01002959 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002960}
2961
bellardaab33092005-10-30 20:48:42 +00002962/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01002963uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2964 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00002965{
2966 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01002967 MemTxResult r;
2968
2969 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2970 if (result) {
2971 *result = r;
2972 }
bellardaab33092005-10-30 20:48:42 +00002973 return val;
2974}
2975
Peter Maydell50013112015-04-26 16:49:24 +01002976uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2977{
2978 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2979}
2980
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002981/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002982static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2983 hwaddr addr,
2984 MemTxAttrs attrs,
2985 MemTxResult *result,
2986 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002987{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002988 uint8_t *ptr;
2989 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002990 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002991 hwaddr l = 2;
2992 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002993 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002994 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002995
Paolo Bonzini41063e12015-03-18 14:21:43 +01002996 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002997 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002998 false);
2999 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003000 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003001
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003002 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003003 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003004#if defined(TARGET_WORDS_BIGENDIAN)
3005 if (endian == DEVICE_LITTLE_ENDIAN) {
3006 val = bswap16(val);
3007 }
3008#else
3009 if (endian == DEVICE_BIG_ENDIAN) {
3010 val = bswap16(val);
3011 }
3012#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003013 } else {
3014 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003015 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003016 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003017 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003018 switch (endian) {
3019 case DEVICE_LITTLE_ENDIAN:
3020 val = lduw_le_p(ptr);
3021 break;
3022 case DEVICE_BIG_ENDIAN:
3023 val = lduw_be_p(ptr);
3024 break;
3025 default:
3026 val = lduw_p(ptr);
3027 break;
3028 }
Peter Maydell50013112015-04-26 16:49:24 +01003029 r = MEMTX_OK;
3030 }
3031 if (result) {
3032 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003033 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003034 if (release_lock) {
3035 qemu_mutex_unlock_iothread();
3036 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003037 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003038 return val;
bellardaab33092005-10-30 20:48:42 +00003039}
3040
Peter Maydell50013112015-04-26 16:49:24 +01003041uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3042 MemTxAttrs attrs, MemTxResult *result)
3043{
3044 return address_space_lduw_internal(as, addr, attrs, result,
3045 DEVICE_NATIVE_ENDIAN);
3046}
3047
3048uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3049 MemTxAttrs attrs, MemTxResult *result)
3050{
3051 return address_space_lduw_internal(as, addr, attrs, result,
3052 DEVICE_LITTLE_ENDIAN);
3053}
3054
3055uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3056 MemTxAttrs attrs, MemTxResult *result)
3057{
3058 return address_space_lduw_internal(as, addr, attrs, result,
3059 DEVICE_BIG_ENDIAN);
3060}
3061
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003062uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003063{
Peter Maydell50013112015-04-26 16:49:24 +01003064 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003065}
3066
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003067uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003068{
Peter Maydell50013112015-04-26 16:49:24 +01003069 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003070}
3071
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003072uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003073{
Peter Maydell50013112015-04-26 16:49:24 +01003074 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003075}
3076
bellard8df1cd02005-01-28 22:37:22 +00003077/* warning: addr must be aligned. The ram page is not masked as dirty
3078 and the code inside is not invalidated. It is useful if the dirty
3079 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003080void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3081 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003082{
bellard8df1cd02005-01-28 22:37:22 +00003083 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003084 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003085 hwaddr l = 4;
3086 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003087 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003088 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003089 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003090
Paolo Bonzini41063e12015-03-18 14:21:43 +01003091 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003092 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003093 true);
3094 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003095 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003096
Peter Maydell50013112015-04-26 16:49:24 +01003097 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003098 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003099 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003100 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003101 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003102
Paolo Bonzini845b6212015-03-23 11:45:53 +01003103 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3104 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003105 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003106 r = MEMTX_OK;
3107 }
3108 if (result) {
3109 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003110 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003111 if (release_lock) {
3112 qemu_mutex_unlock_iothread();
3113 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003114 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003115}
3116
Peter Maydell50013112015-04-26 16:49:24 +01003117void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3118{
3119 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3120}
3121
bellard8df1cd02005-01-28 22:37:22 +00003122/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003123static inline void address_space_stl_internal(AddressSpace *as,
3124 hwaddr addr, uint32_t val,
3125 MemTxAttrs attrs,
3126 MemTxResult *result,
3127 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003128{
bellard8df1cd02005-01-28 22:37:22 +00003129 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003130 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003131 hwaddr l = 4;
3132 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003133 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003134 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003135
Paolo Bonzini41063e12015-03-18 14:21:43 +01003136 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003137 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003138 true);
3139 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003140 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003141
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142#if defined(TARGET_WORDS_BIGENDIAN)
3143 if (endian == DEVICE_LITTLE_ENDIAN) {
3144 val = bswap32(val);
3145 }
3146#else
3147 if (endian == DEVICE_BIG_ENDIAN) {
3148 val = bswap32(val);
3149 }
3150#endif
Peter Maydell50013112015-04-26 16:49:24 +01003151 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003152 } else {
bellard8df1cd02005-01-28 22:37:22 +00003153 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003154 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003155 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003156 switch (endian) {
3157 case DEVICE_LITTLE_ENDIAN:
3158 stl_le_p(ptr, val);
3159 break;
3160 case DEVICE_BIG_ENDIAN:
3161 stl_be_p(ptr, val);
3162 break;
3163 default:
3164 stl_p(ptr, val);
3165 break;
3166 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003167 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003168 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003169 }
Peter Maydell50013112015-04-26 16:49:24 +01003170 if (result) {
3171 *result = r;
3172 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003173 if (release_lock) {
3174 qemu_mutex_unlock_iothread();
3175 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003176 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003177}
3178
3179void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3180 MemTxAttrs attrs, MemTxResult *result)
3181{
3182 address_space_stl_internal(as, addr, val, attrs, result,
3183 DEVICE_NATIVE_ENDIAN);
3184}
3185
3186void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3187 MemTxAttrs attrs, MemTxResult *result)
3188{
3189 address_space_stl_internal(as, addr, val, attrs, result,
3190 DEVICE_LITTLE_ENDIAN);
3191}
3192
3193void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3194 MemTxAttrs attrs, MemTxResult *result)
3195{
3196 address_space_stl_internal(as, addr, val, attrs, result,
3197 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003198}
3199
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003200void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003201{
Peter Maydell50013112015-04-26 16:49:24 +01003202 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003203}
3204
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003205void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003206{
Peter Maydell50013112015-04-26 16:49:24 +01003207 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003208}
3209
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003210void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003211{
Peter Maydell50013112015-04-26 16:49:24 +01003212 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003213}
3214
bellardaab33092005-10-30 20:48:42 +00003215/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003216void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3217 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003218{
3219 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003220 MemTxResult r;
3221
3222 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3223 if (result) {
3224 *result = r;
3225 }
3226}
3227
3228void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3229{
3230 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003231}
3232
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003233/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003234static inline void address_space_stw_internal(AddressSpace *as,
3235 hwaddr addr, uint32_t val,
3236 MemTxAttrs attrs,
3237 MemTxResult *result,
3238 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003239{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003240 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003241 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003242 hwaddr l = 2;
3243 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003244 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003245 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003246
Paolo Bonzini41063e12015-03-18 14:21:43 +01003247 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003248 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003249 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003250 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003251
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003252#if defined(TARGET_WORDS_BIGENDIAN)
3253 if (endian == DEVICE_LITTLE_ENDIAN) {
3254 val = bswap16(val);
3255 }
3256#else
3257 if (endian == DEVICE_BIG_ENDIAN) {
3258 val = bswap16(val);
3259 }
3260#endif
Peter Maydell50013112015-04-26 16:49:24 +01003261 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003262 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003263 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003264 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003265 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003266 switch (endian) {
3267 case DEVICE_LITTLE_ENDIAN:
3268 stw_le_p(ptr, val);
3269 break;
3270 case DEVICE_BIG_ENDIAN:
3271 stw_be_p(ptr, val);
3272 break;
3273 default:
3274 stw_p(ptr, val);
3275 break;
3276 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003277 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003278 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003279 }
Peter Maydell50013112015-04-26 16:49:24 +01003280 if (result) {
3281 *result = r;
3282 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003283 if (release_lock) {
3284 qemu_mutex_unlock_iothread();
3285 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003286 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003287}
3288
3289void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3290 MemTxAttrs attrs, MemTxResult *result)
3291{
3292 address_space_stw_internal(as, addr, val, attrs, result,
3293 DEVICE_NATIVE_ENDIAN);
3294}
3295
3296void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3297 MemTxAttrs attrs, MemTxResult *result)
3298{
3299 address_space_stw_internal(as, addr, val, attrs, result,
3300 DEVICE_LITTLE_ENDIAN);
3301}
3302
3303void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3304 MemTxAttrs attrs, MemTxResult *result)
3305{
3306 address_space_stw_internal(as, addr, val, attrs, result,
3307 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003308}
3309
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003310void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003311{
Peter Maydell50013112015-04-26 16:49:24 +01003312 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313}
3314
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003315void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003316{
Peter Maydell50013112015-04-26 16:49:24 +01003317 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003318}
3319
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003320void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003321{
Peter Maydell50013112015-04-26 16:49:24 +01003322 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003323}
3324
bellardaab33092005-10-30 20:48:42 +00003325/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003326void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3327 MemTxAttrs attrs, MemTxResult *result)
3328{
3329 MemTxResult r;
3330 val = tswap64(val);
3331 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3332 if (result) {
3333 *result = r;
3334 }
3335}
3336
3337void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3338 MemTxAttrs attrs, MemTxResult *result)
3339{
3340 MemTxResult r;
3341 val = cpu_to_le64(val);
3342 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3343 if (result) {
3344 *result = r;
3345 }
3346}
3347void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3348 MemTxAttrs attrs, MemTxResult *result)
3349{
3350 MemTxResult r;
3351 val = cpu_to_be64(val);
3352 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3353 if (result) {
3354 *result = r;
3355 }
3356}
3357
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003358void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003359{
Peter Maydell50013112015-04-26 16:49:24 +01003360 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003361}
3362
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003363void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364{
Peter Maydell50013112015-04-26 16:49:24 +01003365 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366}
3367
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003368void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369{
Peter Maydell50013112015-04-26 16:49:24 +01003370 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003371}
3372
aliguori5e2972f2009-03-28 17:51:36 +00003373/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003374int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003375 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003376{
3377 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003378 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003379 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003380
3381 while (len > 0) {
3382 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003383 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003384 /* if no physical page mapped, return an error */
3385 if (phys_addr == -1)
3386 return -1;
3387 l = (page + TARGET_PAGE_SIZE) - addr;
3388 if (l > len)
3389 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003390 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003391 if (is_write) {
3392 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3393 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003394 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3395 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003396 }
bellard13eb76e2004-01-24 15:23:36 +00003397 len -= l;
3398 buf += l;
3399 addr += l;
3400 }
3401 return 0;
3402}
Paul Brooka68fe892010-03-01 00:08:59 +00003403#endif
bellard13eb76e2004-01-24 15:23:36 +00003404
Blue Swirl8e4a4242013-01-06 18:30:17 +00003405/*
3406 * A helper function for the _utterly broken_ virtio device model to find out if
3407 * it's running on a big endian machine. Don't do this at home kids!
3408 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003409bool target_words_bigendian(void);
3410bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003411{
3412#if defined(TARGET_WORDS_BIGENDIAN)
3413 return true;
3414#else
3415 return false;
3416#endif
3417}
3418
Wen Congyang76f35532012-05-07 12:04:18 +08003419#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003420bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003421{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003422 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003423 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003424 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003425
Paolo Bonzini41063e12015-03-18 14:21:43 +01003426 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003427 mr = address_space_translate(&address_space_memory,
3428 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003429
Paolo Bonzini41063e12015-03-18 14:21:43 +01003430 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3431 rcu_read_unlock();
3432 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003433}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003434
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003435int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003436{
3437 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003438 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003439
Mike Day0dc3f442013-09-05 14:41:35 -04003440 rcu_read_lock();
3441 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003442 ret = func(block->idstr, block->host, block->offset,
3443 block->used_length, opaque);
3444 if (ret) {
3445 break;
3446 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003447 }
Mike Day0dc3f442013-09-05 14:41:35 -04003448 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003449 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003450}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003451#endif