blob: 442df0dd5bd0898673420f1d37a172539a0651d9 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020093DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andreas Färber1a1562f2013-06-17 04:09:11 +0200481const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200487 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200490 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200495 }
496};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200497
pbrook9656f322008-07-01 20:01:19 +0000498#endif
499
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100500CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400501{
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400503
Andreas Färberbdc44642013-06-24 23:50:24 +0200504 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100505 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200506 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100507 }
Glauber Costa950f1472009-06-09 12:15:18 -0400508 }
509
Andreas Färberbdc44642013-06-24 23:50:24 +0200510 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400511}
512
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
Andreas Färber9349b4f2012-03-14 01:38:32 +0100529void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000530{
Andreas Färber9f09e182012-05-03 06:59:07 +0200531 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100532 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200533 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000534 int cpu_index;
535
Eduardo Habkost291135b2015-04-27 17:00:33 -0300536#ifndef CONFIG_USER_ONLY
537 cpu->as = &address_space_memory;
538 cpu->thread_id = qemu_get_thread_id();
539 cpu_reload_memory_map(cpu);
540#endif
541
pbrookc2764712009-03-07 15:24:59 +0000542#if defined(CONFIG_USER_ONLY)
543 cpu_list_lock();
544#endif
bellard6a00d602005-11-21 23:25:50 +0000545 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200546 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000547 cpu_index++;
548 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100549 cpu->cpu_index = cpu_index;
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000551#if defined(CONFIG_USER_ONLY)
552 cpu_list_unlock();
553#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200554 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
555 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
556 }
pbrookb3c77242008-06-30 16:31:04 +0000557#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600558 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000559 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100560 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200561 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000562#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100563 if (cc->vmsd != NULL) {
564 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
565 }
bellardfd6ce8f2003-05-14 19:00:11 +0000566}
567
Paul Brook94df27f2010-02-28 23:47:45 +0000568#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200569static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000570{
571 tb_invalidate_phys_page_range(pc, pc + 1, 0);
572}
573#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200574static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400575{
Max Filippove8262a12013-09-27 22:29:17 +0400576 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
577 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000578 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100579 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400580 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400581}
bellardc27004e2005-01-03 23:35:10 +0000582#endif
bellardd720b932004-04-25 17:57:43 +0000583
Paul Brookc527ee82010-03-01 03:31:14 +0000584#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200585void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000586
587{
588}
589
Peter Maydell3ee887e2014-09-12 14:06:48 +0100590int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
591 int flags)
592{
593 return -ENOSYS;
594}
595
596void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
597{
598}
599
Andreas Färber75a34032013-09-02 16:57:02 +0200600int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000601 int flags, CPUWatchpoint **watchpoint)
602{
603 return -ENOSYS;
604}
605#else
pbrook6658ffb2007-03-16 23:58:11 +0000606/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200607int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000608 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000609{
aliguoric0ce9982008-11-25 22:13:57 +0000610 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000611
Peter Maydell05068c02014-09-12 14:06:48 +0100612 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700613 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200614 error_report("tried to set invalid watchpoint at %"
615 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000616 return -EINVAL;
617 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500618 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000619
aliguoria1d1bb32008-11-18 20:07:32 +0000620 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100621 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000622 wp->flags = flags;
623
aliguori2dc9f412008-11-18 20:56:59 +0000624 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200625 if (flags & BP_GDB) {
626 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
627 } else {
628 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
629 }
aliguoria1d1bb32008-11-18 20:07:32 +0000630
Andreas Färber31b030d2013-09-04 01:29:02 +0200631 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000632
633 if (watchpoint)
634 *watchpoint = wp;
635 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000636}
637
aliguoria1d1bb32008-11-18 20:07:32 +0000638/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200639int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000640 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000641{
aliguoria1d1bb32008-11-18 20:07:32 +0000642 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000643
Andreas Färberff4700b2013-08-26 18:23:18 +0200644 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100645 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000646 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200647 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000648 return 0;
649 }
650 }
aliguoria1d1bb32008-11-18 20:07:32 +0000651 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000652}
653
aliguoria1d1bb32008-11-18 20:07:32 +0000654/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200655void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000656{
Andreas Färberff4700b2013-08-26 18:23:18 +0200657 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000658
Andreas Färber31b030d2013-09-04 01:29:02 +0200659 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000660
Anthony Liguori7267c092011-08-20 22:09:37 -0500661 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000662}
663
aliguoria1d1bb32008-11-18 20:07:32 +0000664/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200665void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000666{
aliguoric0ce9982008-11-25 22:13:57 +0000667 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000668
Andreas Färberff4700b2013-08-26 18:23:18 +0200669 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200670 if (wp->flags & mask) {
671 cpu_watchpoint_remove_by_ref(cpu, wp);
672 }
aliguoric0ce9982008-11-25 22:13:57 +0000673 }
aliguoria1d1bb32008-11-18 20:07:32 +0000674}
Peter Maydell05068c02014-09-12 14:06:48 +0100675
676/* Return true if this watchpoint address matches the specified
677 * access (ie the address range covered by the watchpoint overlaps
678 * partially or completely with the address range covered by the
679 * access).
680 */
681static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
682 vaddr addr,
683 vaddr len)
684{
685 /* We know the lengths are non-zero, but a little caution is
686 * required to avoid errors in the case where the range ends
687 * exactly at the top of the address space and so addr + len
688 * wraps round to zero.
689 */
690 vaddr wpend = wp->vaddr + wp->len - 1;
691 vaddr addrend = addr + len - 1;
692
693 return !(addr > wpend || wp->vaddr > addrend);
694}
695
Paul Brookc527ee82010-03-01 03:31:14 +0000696#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000697
698/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200699int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000700 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000701{
aliguoric0ce9982008-11-25 22:13:57 +0000702 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000703
Anthony Liguori7267c092011-08-20 22:09:37 -0500704 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000705
706 bp->pc = pc;
707 bp->flags = flags;
708
aliguori2dc9f412008-11-18 20:56:59 +0000709 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200710 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200711 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200712 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200713 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200714 }
aliguoria1d1bb32008-11-18 20:07:32 +0000715
Andreas Färberf0c3c502013-08-26 21:22:53 +0200716 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000717
Andreas Färber00b941e2013-06-29 18:55:54 +0200718 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000719 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200720 }
aliguoria1d1bb32008-11-18 20:07:32 +0000721 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000722}
723
724/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200725int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000726{
aliguoria1d1bb32008-11-18 20:07:32 +0000727 CPUBreakpoint *bp;
728
Andreas Färberf0c3c502013-08-26 21:22:53 +0200729 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000730 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200731 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000732 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000733 }
bellard4c3a88a2003-07-26 12:06:08 +0000734 }
aliguoria1d1bb32008-11-18 20:07:32 +0000735 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000736}
737
aliguoria1d1bb32008-11-18 20:07:32 +0000738/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200739void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000740{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200741 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
742
743 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000744
Anthony Liguori7267c092011-08-20 22:09:37 -0500745 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000746}
747
748/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200749void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000750{
aliguoric0ce9982008-11-25 22:13:57 +0000751 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000752
Andreas Färberf0c3c502013-08-26 21:22:53 +0200753 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200754 if (bp->flags & mask) {
755 cpu_breakpoint_remove_by_ref(cpu, bp);
756 }
aliguoric0ce9982008-11-25 22:13:57 +0000757 }
bellard4c3a88a2003-07-26 12:06:08 +0000758}
759
bellardc33a3462003-07-29 20:50:33 +0000760/* enable or disable single step mode. EXCP_DEBUG is returned by the
761 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200762void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000763{
Andreas Färbered2803d2013-06-21 20:20:45 +0200764 if (cpu->singlestep_enabled != enabled) {
765 cpu->singlestep_enabled = enabled;
766 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200767 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200768 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100769 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000770 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200771 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000772 tb_flush(env);
773 }
bellardc33a3462003-07-29 20:50:33 +0000774 }
bellardc33a3462003-07-29 20:50:33 +0000775}
776
Andreas Färbera47dddd2013-09-03 17:38:47 +0200777void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000778{
779 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000780 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000781
782 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000783 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000784 fprintf(stderr, "qemu: fatal: ");
785 vfprintf(stderr, fmt, ap);
786 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200787 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000788 if (qemu_log_enabled()) {
789 qemu_log("qemu: fatal: ");
790 qemu_log_vprintf(fmt, ap2);
791 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200792 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000793 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000794 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000795 }
pbrook493ae1f2007-11-23 16:53:59 +0000796 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000797 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200798#if defined(CONFIG_USER_ONLY)
799 {
800 struct sigaction act;
801 sigfillset(&act.sa_mask);
802 act.sa_handler = SIG_DFL;
803 sigaction(SIGABRT, &act, NULL);
804 }
805#endif
bellard75012672003-06-21 13:11:07 +0000806 abort();
807}
808
bellard01243112004-01-04 15:48:17 +0000809#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400810/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200811static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
812{
813 RAMBlock *block;
814
Paolo Bonzini43771532013-09-09 17:58:40 +0200815 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200816 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200817 goto found;
818 }
Mike Day0dc3f442013-09-05 14:41:35 -0400819 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200820 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200821 goto found;
822 }
823 }
824
825 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
826 abort();
827
828found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200829 /* It is safe to write mru_block outside the iothread lock. This
830 * is what happens:
831 *
832 * mru_block = xxx
833 * rcu_read_unlock()
834 * xxx removed from list
835 * rcu_read_lock()
836 * read mru_block
837 * mru_block = NULL;
838 * call_rcu(reclaim_ramblock, xxx);
839 * rcu_read_unlock()
840 *
841 * atomic_rcu_set is not needed here. The block was already published
842 * when it was placed into the list. Here we're just making an extra
843 * copy of the pointer.
844 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200845 ram_list.mru_block = block;
846 return block;
847}
848
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200849static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000850{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200851 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200852 RAMBlock *block;
853 ram_addr_t end;
854
855 end = TARGET_PAGE_ALIGN(start + length);
856 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000857
Mike Day0dc3f442013-09-05 14:41:35 -0400858 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200859 block = qemu_get_ram_block(start);
860 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200861 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000862 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400863 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200864}
865
866/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000867bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
868 ram_addr_t length,
869 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200870{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000871 unsigned long end, page;
872 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200873
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000874 if (length == 0) {
875 return false;
876 }
877
878 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
879 page = start >> TARGET_PAGE_BITS;
880 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
881 page, end - page);
882
883 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200884 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200885 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000886
887 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000888}
889
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100890/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200891hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200892 MemoryRegionSection *section,
893 target_ulong vaddr,
894 hwaddr paddr, hwaddr xlat,
895 int prot,
896 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000897{
Avi Kivitya8170e52012-10-23 12:30:10 +0200898 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000899 CPUWatchpoint *wp;
900
Blue Swirlcc5bea62012-04-14 14:56:48 +0000901 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000902 /* Normal RAM. */
903 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200904 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000905 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200906 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000907 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200908 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000909 }
910 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100911 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200912 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000913 }
914
915 /* Make accesses to pages with watchpoints go via the
916 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200917 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100918 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000919 /* Avoid trapping reads of pages with a write breakpoint. */
920 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200921 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000922 *address |= TLB_MMIO;
923 break;
924 }
925 }
926 }
927
928 return iotlb;
929}
bellard9fa3e852004-01-04 18:06:42 +0000930#endif /* defined(CONFIG_USER_ONLY) */
931
pbrooke2eef172008-06-08 01:09:01 +0000932#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000933
Anthony Liguoric227f092009-10-01 16:12:16 -0500934static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200935 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200936static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200937
Igor Mammedova2b257d2014-10-31 16:38:37 +0000938static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
939 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200940
941/*
942 * Set a custom physical guest memory alloator.
943 * Accelerators with unusual needs may need this. Hopefully, we can
944 * get rid of it eventually.
945 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000946void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200947{
948 phys_mem_alloc = alloc;
949}
950
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200951static uint16_t phys_section_add(PhysPageMap *map,
952 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200953{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200954 /* The physical section number is ORed with a page-aligned
955 * pointer to produce the iotlb entries. Thus it should
956 * never overflow into the page-aligned value.
957 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200958 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200959
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200960 if (map->sections_nb == map->sections_nb_alloc) {
961 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
962 map->sections = g_renew(MemoryRegionSection, map->sections,
963 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200964 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200965 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200966 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200967 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200968}
969
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200970static void phys_section_destroy(MemoryRegion *mr)
971{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200972 memory_region_unref(mr);
973
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200974 if (mr->subpage) {
975 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700976 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200977 g_free(subpage);
978 }
979}
980
Paolo Bonzini60926662013-05-29 12:30:26 +0200981static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200982{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200983 while (map->sections_nb > 0) {
984 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200985 phys_section_destroy(section->mr);
986 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200987 g_free(map->sections);
988 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200989}
990
Avi Kivityac1970f2012-10-03 16:22:53 +0200991static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200992{
993 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200994 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200995 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200996 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200997 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200998 MemoryRegionSection subsection = {
999 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001000 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001001 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001002 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001003
Avi Kivityf3705d52012-03-08 16:16:34 +02001004 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001005
Avi Kivityf3705d52012-03-08 16:16:34 +02001006 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001007 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001008 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001009 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001010 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001011 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001012 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001013 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001014 }
1015 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001016 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001017 subpage_register(subpage, start, end,
1018 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001019}
1020
1021
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001022static void register_multipage(AddressSpaceDispatch *d,
1023 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001024{
Avi Kivitya8170e52012-10-23 12:30:10 +02001025 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001026 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001027 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1028 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001029
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001030 assert(num_pages);
1031 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001032}
1033
Avi Kivityac1970f2012-10-03 16:22:53 +02001034static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001035{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001036 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001037 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001038 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001039 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001040
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001041 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1042 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1043 - now.offset_within_address_space;
1044
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001045 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001046 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001047 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001048 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001049 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001050 while (int128_ne(remain.size, now.size)) {
1051 remain.size = int128_sub(remain.size, now.size);
1052 remain.offset_within_address_space += int128_get64(now.size);
1053 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001054 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001055 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001056 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001057 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001058 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001059 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001060 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001061 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001062 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001063 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001064 }
1065}
1066
Sheng Yang62a27442010-01-26 19:21:16 +08001067void qemu_flush_coalesced_mmio_buffer(void)
1068{
1069 if (kvm_enabled())
1070 kvm_flush_coalesced_mmio_buffer();
1071}
1072
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001073void qemu_mutex_lock_ramlist(void)
1074{
1075 qemu_mutex_lock(&ram_list.mutex);
1076}
1077
1078void qemu_mutex_unlock_ramlist(void)
1079{
1080 qemu_mutex_unlock(&ram_list.mutex);
1081}
1082
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001083#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001084
1085#include <sys/vfs.h>
1086
1087#define HUGETLBFS_MAGIC 0x958458f6
1088
Hu Taofc7a5802014-09-09 13:28:01 +08001089static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001090{
1091 struct statfs fs;
1092 int ret;
1093
1094 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001095 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001096 } while (ret != 0 && errno == EINTR);
1097
1098 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001099 error_setg_errno(errp, errno, "failed to get page size of file %s",
1100 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001101 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001102 }
1103
1104 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001105 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001106
1107 return fs.f_bsize;
1108}
1109
Alex Williamson04b16652010-07-02 11:13:17 -06001110static void *file_ram_alloc(RAMBlock *block,
1111 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001112 const char *path,
1113 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001114{
1115 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001116 char *sanitized_name;
1117 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001118 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001119 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001120 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001121 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001122
Hu Taofc7a5802014-09-09 13:28:01 +08001123 hpagesize = gethugepagesize(path, &local_err);
1124 if (local_err) {
1125 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001126 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001127 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001128 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001129
1130 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001131 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1132 "or larger than huge page size 0x%" PRIx64,
1133 memory, hpagesize);
1134 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001135 }
1136
1137 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001138 error_setg(errp,
1139 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001140 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001141 }
1142
Peter Feiner8ca761f2013-03-04 13:54:25 -05001143 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001144 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001145 for (c = sanitized_name; *c != '\0'; c++) {
1146 if (*c == '/')
1147 *c = '_';
1148 }
1149
1150 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1151 sanitized_name);
1152 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001153
1154 fd = mkstemp(filename);
1155 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001156 error_setg_errno(errp, errno,
1157 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001158 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001159 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001160 }
1161 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001162 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001163
1164 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1165
1166 /*
1167 * ftruncate is not supported by hugetlbfs in older
1168 * hosts, so don't bother bailing out on errors.
1169 * If anything goes wrong with it under other filesystems,
1170 * mmap will fail.
1171 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001172 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001173 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001174 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001175
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001176 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1177 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1178 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001179 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001180 error_setg_errno(errp, errno,
1181 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001182 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001183 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001184 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001185
1186 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001187 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001188 }
1189
Alex Williamson04b16652010-07-02 11:13:17 -06001190 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001191 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001192
1193error:
1194 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001195 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001196 exit(1);
1197 }
1198 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199}
1200#endif
1201
Mike Day0dc3f442013-09-05 14:41:35 -04001202/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001203static ram_addr_t find_ram_offset(ram_addr_t size)
1204{
Alex Williamson04b16652010-07-02 11:13:17 -06001205 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001206 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001207
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001208 assert(size != 0); /* it would hand out same offset multiple times */
1209
Mike Day0dc3f442013-09-05 14:41:35 -04001210 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001211 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001212 }
Alex Williamson04b16652010-07-02 11:13:17 -06001213
Mike Day0dc3f442013-09-05 14:41:35 -04001214 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001215 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001216
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001217 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001218
Mike Day0dc3f442013-09-05 14:41:35 -04001219 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001220 if (next_block->offset >= end) {
1221 next = MIN(next, next_block->offset);
1222 }
1223 }
1224 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001225 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001226 mingap = next - end;
1227 }
1228 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001229
1230 if (offset == RAM_ADDR_MAX) {
1231 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1232 (uint64_t)size);
1233 abort();
1234 }
1235
Alex Williamson04b16652010-07-02 11:13:17 -06001236 return offset;
1237}
1238
Juan Quintela652d7ec2012-07-20 10:37:54 +02001239ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001240{
Alex Williamsond17b5282010-06-25 11:08:38 -06001241 RAMBlock *block;
1242 ram_addr_t last = 0;
1243
Mike Day0dc3f442013-09-05 14:41:35 -04001244 rcu_read_lock();
1245 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001246 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001247 }
Mike Day0dc3f442013-09-05 14:41:35 -04001248 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001249 return last;
1250}
1251
Jason Baronddb97f12012-08-02 15:44:16 -04001252static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1253{
1254 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001255
1256 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001257 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001258 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1259 if (ret) {
1260 perror("qemu_madvise");
1261 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1262 "but dump_guest_core=off specified\n");
1263 }
1264 }
1265}
1266
Mike Day0dc3f442013-09-05 14:41:35 -04001267/* Called within an RCU critical section, or while the ramlist lock
1268 * is held.
1269 */
Hu Tao20cfe882014-04-02 15:13:26 +08001270static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001271{
Hu Tao20cfe882014-04-02 15:13:26 +08001272 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001273
Mike Day0dc3f442013-09-05 14:41:35 -04001274 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001275 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001276 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001277 }
1278 }
Hu Tao20cfe882014-04-02 15:13:26 +08001279
1280 return NULL;
1281}
1282
Mike Dayae3a7042013-09-05 14:41:35 -04001283/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001284void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1285{
Mike Dayae3a7042013-09-05 14:41:35 -04001286 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001287
Mike Day0dc3f442013-09-05 14:41:35 -04001288 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001289 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001290 assert(new_block);
1291 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001292
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001293 if (dev) {
1294 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001295 if (id) {
1296 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001297 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001298 }
1299 }
1300 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1301
Mike Day0dc3f442013-09-05 14:41:35 -04001302 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001303 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001304 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1305 new_block->idstr);
1306 abort();
1307 }
1308 }
Mike Day0dc3f442013-09-05 14:41:35 -04001309 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001310}
1311
Mike Dayae3a7042013-09-05 14:41:35 -04001312/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001313void qemu_ram_unset_idstr(ram_addr_t addr)
1314{
Mike Dayae3a7042013-09-05 14:41:35 -04001315 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001316
Mike Dayae3a7042013-09-05 14:41:35 -04001317 /* FIXME: arch_init.c assumes that this is not called throughout
1318 * migration. Ignore the problem since hot-unplug during migration
1319 * does not work anyway.
1320 */
1321
Mike Day0dc3f442013-09-05 14:41:35 -04001322 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001323 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001324 if (block) {
1325 memset(block->idstr, 0, sizeof(block->idstr));
1326 }
Mike Day0dc3f442013-09-05 14:41:35 -04001327 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001328}
1329
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001330static int memory_try_enable_merging(void *addr, size_t len)
1331{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001332 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001333 /* disabled by the user */
1334 return 0;
1335 }
1336
1337 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1338}
1339
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001340/* Only legal before guest might have detected the memory size: e.g. on
1341 * incoming migration, or right after reset.
1342 *
1343 * As memory core doesn't know how is memory accessed, it is up to
1344 * resize callback to update device state and/or add assertions to detect
1345 * misuse, if necessary.
1346 */
1347int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1348{
1349 RAMBlock *block = find_ram_block(base);
1350
1351 assert(block);
1352
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001353 newsize = TARGET_PAGE_ALIGN(newsize);
1354
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001355 if (block->used_length == newsize) {
1356 return 0;
1357 }
1358
1359 if (!(block->flags & RAM_RESIZEABLE)) {
1360 error_setg_errno(errp, EINVAL,
1361 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1362 " in != 0x" RAM_ADDR_FMT, block->idstr,
1363 newsize, block->used_length);
1364 return -EINVAL;
1365 }
1366
1367 if (block->max_length < newsize) {
1368 error_setg_errno(errp, EINVAL,
1369 "Length too large: %s: 0x" RAM_ADDR_FMT
1370 " > 0x" RAM_ADDR_FMT, block->idstr,
1371 newsize, block->max_length);
1372 return -EINVAL;
1373 }
1374
1375 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1376 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001377 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1378 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001379 memory_region_set_size(block->mr, newsize);
1380 if (block->resized) {
1381 block->resized(block->idstr, newsize, block->host);
1382 }
1383 return 0;
1384}
1385
Hu Taoef701d72014-09-09 13:27:54 +08001386static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001387{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001388 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001389 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001390 ram_addr_t old_ram_size, new_ram_size;
1391
1392 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001393
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001394 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001395 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001396
1397 if (!new_block->host) {
1398 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001399 xen_ram_alloc(new_block->offset, new_block->max_length,
1400 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001401 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001402 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001403 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001404 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001405 error_setg_errno(errp, errno,
1406 "cannot set up guest memory '%s'",
1407 memory_region_name(new_block->mr));
1408 qemu_mutex_unlock_ramlist();
1409 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001410 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001411 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001412 }
1413 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001414
Li Zhijiandd631692015-07-02 20:18:06 +08001415 new_ram_size = MAX(old_ram_size,
1416 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1417 if (new_ram_size > old_ram_size) {
1418 migration_bitmap_extend(old_ram_size, new_ram_size);
1419 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001420 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1421 * QLIST (which has an RCU-friendly variant) does not have insertion at
1422 * tail, so save the last element in last_block.
1423 */
Mike Day0dc3f442013-09-05 14:41:35 -04001424 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001425 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001426 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001427 break;
1428 }
1429 }
1430 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001431 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001432 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001433 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001434 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001435 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001436 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001437 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001438
Mike Day0dc3f442013-09-05 14:41:35 -04001439 /* Write list before version */
1440 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001441 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001442 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001443
Juan Quintela2152f5c2013-10-08 13:52:02 +02001444 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1445
1446 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001447 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001448
1449 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001450 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1451 ram_list.dirty_memory[i] =
1452 bitmap_zero_extend(ram_list.dirty_memory[i],
1453 old_ram_size, new_ram_size);
1454 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001455 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001456 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001457 new_block->used_length,
1458 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001459
Paolo Bonzinia904c912015-01-21 16:18:35 +01001460 if (new_block->host) {
1461 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1462 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1463 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1464 if (kvm_enabled()) {
1465 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1466 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001467 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001468
1469 return new_block->offset;
1470}
1471
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001472#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001473ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001474 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001475 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001476{
1477 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001478 ram_addr_t addr;
1479 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001480
1481 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001482 error_setg(errp, "-mem-path not supported with Xen");
1483 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001484 }
1485
1486 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1487 /*
1488 * file_ram_alloc() needs to allocate just like
1489 * phys_mem_alloc, but we haven't bothered to provide
1490 * a hook there.
1491 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001492 error_setg(errp,
1493 "-mem-path not supported with this accelerator");
1494 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001495 }
1496
1497 size = TARGET_PAGE_ALIGN(size);
1498 new_block = g_malloc0(sizeof(*new_block));
1499 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001500 new_block->used_length = size;
1501 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001502 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001503 new_block->host = file_ram_alloc(new_block, size,
1504 mem_path, errp);
1505 if (!new_block->host) {
1506 g_free(new_block);
1507 return -1;
1508 }
1509
Hu Taoef701d72014-09-09 13:27:54 +08001510 addr = ram_block_add(new_block, &local_err);
1511 if (local_err) {
1512 g_free(new_block);
1513 error_propagate(errp, local_err);
1514 return -1;
1515 }
1516 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001517}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001518#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001519
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001520static
1521ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1522 void (*resized)(const char*,
1523 uint64_t length,
1524 void *host),
1525 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001526 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001527{
1528 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001529 ram_addr_t addr;
1530 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001531
1532 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001533 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001534 new_block = g_malloc0(sizeof(*new_block));
1535 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001536 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001537 new_block->used_length = size;
1538 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001539 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001540 new_block->fd = -1;
1541 new_block->host = host;
1542 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001543 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001544 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001545 if (resizeable) {
1546 new_block->flags |= RAM_RESIZEABLE;
1547 }
Hu Taoef701d72014-09-09 13:27:54 +08001548 addr = ram_block_add(new_block, &local_err);
1549 if (local_err) {
1550 g_free(new_block);
1551 error_propagate(errp, local_err);
1552 return -1;
1553 }
1554 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001555}
1556
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001557ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1558 MemoryRegion *mr, Error **errp)
1559{
1560 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1561}
1562
Hu Taoef701d72014-09-09 13:27:54 +08001563ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001564{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001565 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1566}
1567
1568ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1569 void (*resized)(const char*,
1570 uint64_t length,
1571 void *host),
1572 MemoryRegion *mr, Error **errp)
1573{
1574 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001575}
bellarde9a1ab12007-02-08 23:08:38 +00001576
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001577void qemu_ram_free_from_ptr(ram_addr_t addr)
1578{
1579 RAMBlock *block;
1580
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001581 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001582 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001583 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001584 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001585 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001586 /* Write list before version */
1587 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001588 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001589 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001590 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001591 }
1592 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001593 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001594}
1595
Paolo Bonzini43771532013-09-09 17:58:40 +02001596static void reclaim_ramblock(RAMBlock *block)
1597{
1598 if (block->flags & RAM_PREALLOC) {
1599 ;
1600 } else if (xen_enabled()) {
1601 xen_invalidate_map_cache_entry(block->host);
1602#ifndef _WIN32
1603 } else if (block->fd >= 0) {
1604 munmap(block->host, block->max_length);
1605 close(block->fd);
1606#endif
1607 } else {
1608 qemu_anon_ram_free(block->host, block->max_length);
1609 }
1610 g_free(block);
1611}
1612
Anthony Liguoric227f092009-10-01 16:12:16 -05001613void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001614{
Alex Williamson04b16652010-07-02 11:13:17 -06001615 RAMBlock *block;
1616
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001617 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001618 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001619 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001620 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001621 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001622 /* Write list before version */
1623 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001624 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001625 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001626 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001627 }
1628 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001629 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001630}
1631
Huang Yingcd19cfa2011-03-02 08:56:19 +01001632#ifndef _WIN32
1633void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1634{
1635 RAMBlock *block;
1636 ram_addr_t offset;
1637 int flags;
1638 void *area, *vaddr;
1639
Mike Day0dc3f442013-09-05 14:41:35 -04001640 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001641 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001642 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001643 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001644 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001645 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001646 } else if (xen_enabled()) {
1647 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001648 } else {
1649 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001650 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001651 flags |= (block->flags & RAM_SHARED ?
1652 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001653 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1654 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001655 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001656 /*
1657 * Remap needs to match alloc. Accelerators that
1658 * set phys_mem_alloc never remap. If they did,
1659 * we'd need a remap hook here.
1660 */
1661 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1662
Huang Yingcd19cfa2011-03-02 08:56:19 +01001663 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1664 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1665 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001666 }
1667 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001668 fprintf(stderr, "Could not remap addr: "
1669 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001670 length, addr);
1671 exit(1);
1672 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001673 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001674 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001675 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001676 }
1677 }
1678}
1679#endif /* !_WIN32 */
1680
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001681int qemu_get_ram_fd(ram_addr_t addr)
1682{
Mike Dayae3a7042013-09-05 14:41:35 -04001683 RAMBlock *block;
1684 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001685
Mike Day0dc3f442013-09-05 14:41:35 -04001686 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001687 block = qemu_get_ram_block(addr);
1688 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001689 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001690 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001691}
1692
Damjan Marion3fd74b82014-06-26 23:01:32 +02001693void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1694{
Mike Dayae3a7042013-09-05 14:41:35 -04001695 RAMBlock *block;
1696 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001697
Mike Day0dc3f442013-09-05 14:41:35 -04001698 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001699 block = qemu_get_ram_block(addr);
1700 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001701 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001702 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001703}
1704
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001705/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001706 * This should not be used for general purpose DMA. Use address_space_map
1707 * or address_space_rw instead. For local memory (e.g. video ram) that the
1708 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001709 *
1710 * By the time this function returns, the returned pointer is not protected
1711 * by RCU anymore. If the caller is not within an RCU critical section and
1712 * does not hold the iothread lock, it must have other means of protecting the
1713 * pointer, such as a reference to the region that includes the incoming
1714 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001715 */
1716void *qemu_get_ram_ptr(ram_addr_t addr)
1717{
Mike Dayae3a7042013-09-05 14:41:35 -04001718 RAMBlock *block;
1719 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001720
Mike Day0dc3f442013-09-05 14:41:35 -04001721 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001722 block = qemu_get_ram_block(addr);
1723
1724 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001725 /* We need to check if the requested address is in the RAM
1726 * because we don't want to map the entire memory in QEMU.
1727 * In that case just map until the end of the page.
1728 */
1729 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001730 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001731 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001732 }
Mike Dayae3a7042013-09-05 14:41:35 -04001733
1734 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001735 }
Mike Dayae3a7042013-09-05 14:41:35 -04001736 ptr = ramblock_ptr(block, addr - block->offset);
1737
Mike Day0dc3f442013-09-05 14:41:35 -04001738unlock:
1739 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001740 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001741}
1742
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001743/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001744 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001745 *
1746 * By the time this function returns, the returned pointer is not protected
1747 * by RCU anymore. If the caller is not within an RCU critical section and
1748 * does not hold the iothread lock, it must have other means of protecting the
1749 * pointer, such as a reference to the region that includes the incoming
1750 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001751 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001752static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001753{
Mike Dayae3a7042013-09-05 14:41:35 -04001754 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001755 if (*size == 0) {
1756 return NULL;
1757 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001758 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001759 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001760 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001761 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001762 rcu_read_lock();
1763 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001764 if (addr - block->offset < block->max_length) {
1765 if (addr - block->offset + *size > block->max_length)
1766 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001767 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001768 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001769 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001770 }
1771 }
1772
1773 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1774 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001775 }
1776}
1777
Paolo Bonzini7443b432013-06-03 12:44:02 +02001778/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001779 * (typically a TLB entry) back to a ram offset.
1780 *
1781 * By the time this function returns, the returned pointer is not protected
1782 * by RCU anymore. If the caller is not within an RCU critical section and
1783 * does not hold the iothread lock, it must have other means of protecting the
1784 * pointer, such as a reference to the region that includes the incoming
1785 * ram_addr_t.
1786 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001787MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001788{
pbrook94a6b542009-04-11 17:15:54 +00001789 RAMBlock *block;
1790 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001791 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001792
Jan Kiszka868bb332011-06-21 22:59:09 +02001793 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001794 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001795 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001796 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001797 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001798 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001799 }
1800
Mike Day0dc3f442013-09-05 14:41:35 -04001801 rcu_read_lock();
1802 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001803 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001804 goto found;
1805 }
1806
Mike Day0dc3f442013-09-05 14:41:35 -04001807 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001808 /* This case append when the block is not mapped. */
1809 if (block->host == NULL) {
1810 continue;
1811 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001812 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001813 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001814 }
pbrook94a6b542009-04-11 17:15:54 +00001815 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001816
Mike Day0dc3f442013-09-05 14:41:35 -04001817 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001818 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001819
1820found:
1821 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001822 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001823 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001824 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001825}
Alex Williamsonf471a172010-06-11 11:11:42 -06001826
Avi Kivitya8170e52012-10-23 12:30:10 +02001827static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001828 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001829{
Juan Quintela52159192013-10-08 12:44:04 +02001830 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001831 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001832 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001833 switch (size) {
1834 case 1:
1835 stb_p(qemu_get_ram_ptr(ram_addr), val);
1836 break;
1837 case 2:
1838 stw_p(qemu_get_ram_ptr(ram_addr), val);
1839 break;
1840 case 4:
1841 stl_p(qemu_get_ram_ptr(ram_addr), val);
1842 break;
1843 default:
1844 abort();
1845 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001846 /* Set both VGA and migration bits for simplicity and to remove
1847 * the notdirty callback faster.
1848 */
1849 cpu_physical_memory_set_dirty_range(ram_addr, size,
1850 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001851 /* we remove the notdirty callback only if the code has been
1852 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001853 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001854 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001855 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001856 }
bellard1ccde1c2004-02-06 19:46:14 +00001857}
1858
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001859static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1860 unsigned size, bool is_write)
1861{
1862 return is_write;
1863}
1864
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001865static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001866 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001867 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001868 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001869};
1870
pbrook0f459d12008-06-09 00:20:13 +00001871/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001872static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001873{
Andreas Färber93afead2013-08-26 03:41:01 +02001874 CPUState *cpu = current_cpu;
1875 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001876 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001877 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001878 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001879 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001880
Andreas Färberff4700b2013-08-26 18:23:18 +02001881 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001882 /* We re-entered the check after replacing the TB. Now raise
1883 * the debug interrupt so that is will trigger after the
1884 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001885 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001886 return;
1887 }
Andreas Färber93afead2013-08-26 03:41:01 +02001888 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001889 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001890 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1891 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001892 if (flags == BP_MEM_READ) {
1893 wp->flags |= BP_WATCHPOINT_HIT_READ;
1894 } else {
1895 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1896 }
1897 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001898 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001899 if (!cpu->watchpoint_hit) {
1900 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001901 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001902 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001903 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001904 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001905 } else {
1906 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001907 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001908 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001909 }
aliguori06d55cc2008-11-18 20:24:06 +00001910 }
aliguori6e140f22008-11-18 20:37:55 +00001911 } else {
1912 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001913 }
1914 }
1915}
1916
pbrook6658ffb2007-03-16 23:58:11 +00001917/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1918 so these check for a hit then pass through to the normal out-of-line
1919 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001920static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1921 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001922{
Peter Maydell66b9b432015-04-26 16:49:24 +01001923 MemTxResult res;
1924 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001925
Peter Maydell66b9b432015-04-26 16:49:24 +01001926 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001927 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001928 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001929 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001930 break;
1931 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001932 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001933 break;
1934 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001935 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001936 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001937 default: abort();
1938 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001939 *pdata = data;
1940 return res;
1941}
1942
1943static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1944 uint64_t val, unsigned size,
1945 MemTxAttrs attrs)
1946{
1947 MemTxResult res;
1948
1949 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1950 switch (size) {
1951 case 1:
1952 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1953 break;
1954 case 2:
1955 address_space_stw(&address_space_memory, addr, val, attrs, &res);
1956 break;
1957 case 4:
1958 address_space_stl(&address_space_memory, addr, val, attrs, &res);
1959 break;
1960 default: abort();
1961 }
1962 return res;
pbrook6658ffb2007-03-16 23:58:11 +00001963}
1964
Avi Kivity1ec9b902012-01-02 12:47:48 +02001965static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01001966 .read_with_attrs = watch_mem_read,
1967 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001968 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001969};
pbrook6658ffb2007-03-16 23:58:11 +00001970
Peter Maydellf25a49e2015-04-26 16:49:24 +01001971static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1972 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001973{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001974 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001975 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01001976 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001977
blueswir1db7b5422007-05-26 17:36:03 +00001978#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001979 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001980 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001981#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01001982 res = address_space_read(subpage->as, addr + subpage->base,
1983 attrs, buf, len);
1984 if (res) {
1985 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01001986 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001987 switch (len) {
1988 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001989 *data = ldub_p(buf);
1990 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001991 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001992 *data = lduw_p(buf);
1993 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001994 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001995 *data = ldl_p(buf);
1996 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001997 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001998 *data = ldq_p(buf);
1999 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002000 default:
2001 abort();
2002 }
blueswir1db7b5422007-05-26 17:36:03 +00002003}
2004
Peter Maydellf25a49e2015-04-26 16:49:24 +01002005static MemTxResult subpage_write(void *opaque, hwaddr addr,
2006 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002007{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002008 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002009 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002010
blueswir1db7b5422007-05-26 17:36:03 +00002011#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002012 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002013 " value %"PRIx64"\n",
2014 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002015#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002016 switch (len) {
2017 case 1:
2018 stb_p(buf, value);
2019 break;
2020 case 2:
2021 stw_p(buf, value);
2022 break;
2023 case 4:
2024 stl_p(buf, value);
2025 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002026 case 8:
2027 stq_p(buf, value);
2028 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002029 default:
2030 abort();
2031 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002032 return address_space_write(subpage->as, addr + subpage->base,
2033 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002034}
2035
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002036static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002037 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002038{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002039 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002040#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002041 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002042 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002043#endif
2044
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002045 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002046 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002047}
2048
Avi Kivity70c68e42012-01-02 12:32:48 +02002049static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002050 .read_with_attrs = subpage_read,
2051 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002052 .impl.min_access_size = 1,
2053 .impl.max_access_size = 8,
2054 .valid.min_access_size = 1,
2055 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002056 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002057 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002058};
2059
Anthony Liguoric227f092009-10-01 16:12:16 -05002060static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002061 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002062{
2063 int idx, eidx;
2064
2065 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2066 return -1;
2067 idx = SUBPAGE_IDX(start);
2068 eidx = SUBPAGE_IDX(end);
2069#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002070 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2071 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002072#endif
blueswir1db7b5422007-05-26 17:36:03 +00002073 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002074 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002075 }
2076
2077 return 0;
2078}
2079
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002080static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002081{
Anthony Liguoric227f092009-10-01 16:12:16 -05002082 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002083
Anthony Liguori7267c092011-08-20 22:09:37 -05002084 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002085
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002086 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002087 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002088 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002089 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002090 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002091#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002092 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2093 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002094#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002095 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002096
2097 return mmio;
2098}
2099
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002100static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2101 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002102{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002103 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002104 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002105 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002106 .mr = mr,
2107 .offset_within_address_space = 0,
2108 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002109 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002110 };
2111
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002112 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002113}
2114
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002115MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002116{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002117 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2118 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002119
2120 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002121}
2122
Avi Kivitye9179ce2009-06-14 11:38:52 +03002123static void io_mem_init(void)
2124{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002125 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002126 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002127 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002128 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002129 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002130 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002131 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002132}
2133
Avi Kivityac1970f2012-10-03 16:22:53 +02002134static void mem_begin(MemoryListener *listener)
2135{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002136 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002137 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2138 uint16_t n;
2139
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002140 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002141 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002142 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002143 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002144 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002145 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002146 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002147 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002148
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002149 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002150 d->as = as;
2151 as->next_dispatch = d;
2152}
2153
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002154static void address_space_dispatch_free(AddressSpaceDispatch *d)
2155{
2156 phys_sections_free(&d->map);
2157 g_free(d);
2158}
2159
Paolo Bonzini00752702013-05-29 12:13:54 +02002160static void mem_commit(MemoryListener *listener)
2161{
2162 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002163 AddressSpaceDispatch *cur = as->dispatch;
2164 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002166 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002167
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002168 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002169 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002170 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002171 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002172}
2173
Avi Kivity1d711482012-10-02 18:54:45 +02002174static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002175{
Andreas Färber182735e2013-05-29 22:29:20 +02002176 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002177
2178 /* since each CPU stores ram addresses in its TLB cache, we must
2179 reset the modified entries */
2180 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002181 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002182 /* FIXME: Disentangle the cpu.h circular files deps so we can
2183 directly get the right CPU from listener. */
2184 if (cpu->tcg_as_listener != listener) {
2185 continue;
2186 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002187 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002188 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002189}
2190
Avi Kivityac1970f2012-10-03 16:22:53 +02002191void address_space_init_dispatch(AddressSpace *as)
2192{
Paolo Bonzini00752702013-05-29 12:13:54 +02002193 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002194 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002195 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002196 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002197 .region_add = mem_add,
2198 .region_nop = mem_add,
2199 .priority = 0,
2200 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002201 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002202}
2203
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002204void address_space_unregister(AddressSpace *as)
2205{
2206 memory_listener_unregister(&as->dispatch_listener);
2207}
2208
Avi Kivity83f3c252012-10-07 12:59:55 +02002209void address_space_destroy_dispatch(AddressSpace *as)
2210{
2211 AddressSpaceDispatch *d = as->dispatch;
2212
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002213 atomic_rcu_set(&as->dispatch, NULL);
2214 if (d) {
2215 call_rcu(d, address_space_dispatch_free, rcu);
2216 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002217}
2218
Avi Kivity62152b82011-07-26 14:26:14 +03002219static void memory_map_init(void)
2220{
Anthony Liguori7267c092011-08-20 22:09:37 -05002221 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002222
Paolo Bonzini57271d62013-11-07 17:14:37 +01002223 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002224 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002225
Anthony Liguori7267c092011-08-20 22:09:37 -05002226 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002227 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2228 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002229 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002230}
2231
2232MemoryRegion *get_system_memory(void)
2233{
2234 return system_memory;
2235}
2236
Avi Kivity309cb472011-08-08 16:09:03 +03002237MemoryRegion *get_system_io(void)
2238{
2239 return system_io;
2240}
2241
pbrooke2eef172008-06-08 01:09:01 +00002242#endif /* !defined(CONFIG_USER_ONLY) */
2243
bellard13eb76e2004-01-24 15:23:36 +00002244/* physical memory access (slow version, mainly for debug) */
2245#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002246int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002247 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002248{
2249 int l, flags;
2250 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002251 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002252
2253 while (len > 0) {
2254 page = addr & TARGET_PAGE_MASK;
2255 l = (page + TARGET_PAGE_SIZE) - addr;
2256 if (l > len)
2257 l = len;
2258 flags = page_get_flags(page);
2259 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002260 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002261 if (is_write) {
2262 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002263 return -1;
bellard579a97f2007-11-11 14:26:47 +00002264 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002265 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002266 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002267 memcpy(p, buf, l);
2268 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002269 } else {
2270 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002271 return -1;
bellard579a97f2007-11-11 14:26:47 +00002272 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002273 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002274 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002275 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002276 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002277 }
2278 len -= l;
2279 buf += l;
2280 addr += l;
2281 }
Paul Brooka68fe892010-03-01 00:08:59 +00002282 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002283}
bellard8df1cd02005-01-28 22:37:22 +00002284
bellard13eb76e2004-01-24 15:23:36 +00002285#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002286
Paolo Bonzini845b6212015-03-23 11:45:53 +01002287static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002288 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002289{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002290 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2291 /* No early return if dirty_log_mask is or becomes 0, because
2292 * cpu_physical_memory_set_dirty_range will still call
2293 * xen_modified_memory.
2294 */
2295 if (dirty_log_mask) {
2296 dirty_log_mask =
2297 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002298 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002299 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2300 tb_invalidate_phys_range(addr, addr + length);
2301 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2302 }
2303 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002304}
2305
Richard Henderson23326162013-07-08 14:55:59 -07002306static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002307{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002308 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002309
2310 /* Regions are assumed to support 1-4 byte accesses unless
2311 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002312 if (access_size_max == 0) {
2313 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002314 }
Richard Henderson23326162013-07-08 14:55:59 -07002315
2316 /* Bound the maximum access by the alignment of the address. */
2317 if (!mr->ops->impl.unaligned) {
2318 unsigned align_size_max = addr & -addr;
2319 if (align_size_max != 0 && align_size_max < access_size_max) {
2320 access_size_max = align_size_max;
2321 }
2322 }
2323
2324 /* Don't attempt accesses larger than the maximum. */
2325 if (l > access_size_max) {
2326 l = access_size_max;
2327 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002328 if (l & (l - 1)) {
2329 l = 1 << (qemu_fls(l) - 1);
2330 }
Richard Henderson23326162013-07-08 14:55:59 -07002331
2332 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002333}
2334
Jan Kiszka4840f102015-06-18 18:47:22 +02002335static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002336{
Jan Kiszka4840f102015-06-18 18:47:22 +02002337 bool unlocked = !qemu_mutex_iothread_locked();
2338 bool release_lock = false;
2339
2340 if (unlocked && mr->global_locking) {
2341 qemu_mutex_lock_iothread();
2342 unlocked = false;
2343 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002344 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002345 if (mr->flush_coalesced_mmio) {
2346 if (unlocked) {
2347 qemu_mutex_lock_iothread();
2348 }
2349 qemu_flush_coalesced_mmio_buffer();
2350 if (unlocked) {
2351 qemu_mutex_unlock_iothread();
2352 }
2353 }
2354
2355 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002356}
2357
Peter Maydell5c9eb022015-04-26 16:49:24 +01002358MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2359 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002360{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002361 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002362 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002363 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002364 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002365 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002366 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002367 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002368
Paolo Bonzini41063e12015-03-18 14:21:43 +01002369 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002370 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002371 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002372 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002373
bellard13eb76e2004-01-24 15:23:36 +00002374 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002375 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002376 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002377 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002378 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002379 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002380 switch (l) {
2381 case 8:
2382 /* 64 bit write access */
2383 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002384 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2385 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002386 break;
2387 case 4:
bellard1c213d12005-09-03 10:49:04 +00002388 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002389 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002390 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2391 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002392 break;
2393 case 2:
bellard1c213d12005-09-03 10:49:04 +00002394 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002395 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002396 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2397 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002398 break;
2399 case 1:
bellard1c213d12005-09-03 10:49:04 +00002400 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002401 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002402 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2403 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002404 break;
2405 default:
2406 abort();
bellard13eb76e2004-01-24 15:23:36 +00002407 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002408 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002409 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002410 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002411 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002412 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002413 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002414 }
2415 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002416 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002417 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002418 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002419 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002420 switch (l) {
2421 case 8:
2422 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002423 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2424 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002425 stq_p(buf, val);
2426 break;
2427 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002428 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002429 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2430 attrs);
bellardc27004e2005-01-03 23:35:10 +00002431 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002432 break;
2433 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002434 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002435 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2436 attrs);
bellardc27004e2005-01-03 23:35:10 +00002437 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002438 break;
2439 case 1:
bellard1c213d12005-09-03 10:49:04 +00002440 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002441 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2442 attrs);
bellardc27004e2005-01-03 23:35:10 +00002443 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002444 break;
2445 default:
2446 abort();
bellard13eb76e2004-01-24 15:23:36 +00002447 }
2448 } else {
2449 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002450 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002451 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002452 }
2453 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002454
2455 if (release_lock) {
2456 qemu_mutex_unlock_iothread();
2457 release_lock = false;
2458 }
2459
bellard13eb76e2004-01-24 15:23:36 +00002460 len -= l;
2461 buf += l;
2462 addr += l;
2463 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002464 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002465
Peter Maydell3b643492015-04-26 16:49:23 +01002466 return result;
bellard13eb76e2004-01-24 15:23:36 +00002467}
bellard8df1cd02005-01-28 22:37:22 +00002468
Peter Maydell5c9eb022015-04-26 16:49:24 +01002469MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2470 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002471{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002472 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002473}
2474
Peter Maydell5c9eb022015-04-26 16:49:24 +01002475MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2476 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002477{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002478 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002479}
2480
2481
Avi Kivitya8170e52012-10-23 12:30:10 +02002482void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002483 int len, int is_write)
2484{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002485 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2486 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002487}
2488
Alexander Graf582b55a2013-12-11 14:17:44 +01002489enum write_rom_type {
2490 WRITE_DATA,
2491 FLUSH_CACHE,
2492};
2493
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002494static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002495 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002496{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002497 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002498 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002499 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002500 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002501
Paolo Bonzini41063e12015-03-18 14:21:43 +01002502 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002503 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002504 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002505 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002506
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002507 if (!(memory_region_is_ram(mr) ||
2508 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002509 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002510 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002511 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002512 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002513 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002514 switch (type) {
2515 case WRITE_DATA:
2516 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002517 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002518 break;
2519 case FLUSH_CACHE:
2520 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2521 break;
2522 }
bellardd0ecd2a2006-04-23 17:14:48 +00002523 }
2524 len -= l;
2525 buf += l;
2526 addr += l;
2527 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002528 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002529}
2530
Alexander Graf582b55a2013-12-11 14:17:44 +01002531/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002532void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002533 const uint8_t *buf, int len)
2534{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002535 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002536}
2537
2538void cpu_flush_icache_range(hwaddr start, int len)
2539{
2540 /*
2541 * This function should do the same thing as an icache flush that was
2542 * triggered from within the guest. For TCG we are always cache coherent,
2543 * so there is no need to flush anything. For KVM / Xen we need to flush
2544 * the host's instruction cache at least.
2545 */
2546 if (tcg_enabled()) {
2547 return;
2548 }
2549
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002550 cpu_physical_memory_write_rom_internal(&address_space_memory,
2551 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002552}
2553
aliguori6d16c2f2009-01-22 16:59:11 +00002554typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002555 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002556 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002557 hwaddr addr;
2558 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002559 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002560} BounceBuffer;
2561
2562static BounceBuffer bounce;
2563
aliguoriba223c22009-01-22 16:59:16 +00002564typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002565 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002566 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002567} MapClient;
2568
Fam Zheng38e047b2015-03-16 17:03:35 +08002569QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002570static QLIST_HEAD(map_client_list, MapClient) map_client_list
2571 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002572
Fam Zhenge95205e2015-03-16 17:03:37 +08002573static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002574{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002575 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002576 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002577}
2578
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002579static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002580{
2581 MapClient *client;
2582
Blue Swirl72cf2d42009-09-12 07:36:22 +00002583 while (!QLIST_EMPTY(&map_client_list)) {
2584 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002585 qemu_bh_schedule(client->bh);
2586 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002587 }
2588}
2589
Fam Zhenge95205e2015-03-16 17:03:37 +08002590void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002591{
2592 MapClient *client = g_malloc(sizeof(*client));
2593
Fam Zheng38e047b2015-03-16 17:03:35 +08002594 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002595 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002596 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002597 if (!atomic_read(&bounce.in_use)) {
2598 cpu_notify_map_clients_locked();
2599 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002600 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002601}
2602
Fam Zheng38e047b2015-03-16 17:03:35 +08002603void cpu_exec_init_all(void)
2604{
2605 qemu_mutex_init(&ram_list.mutex);
2606 memory_map_init();
2607 io_mem_init();
2608 qemu_mutex_init(&map_client_list_lock);
2609}
2610
Fam Zhenge95205e2015-03-16 17:03:37 +08002611void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002612{
Fam Zhenge95205e2015-03-16 17:03:37 +08002613 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002614
Fam Zhenge95205e2015-03-16 17:03:37 +08002615 qemu_mutex_lock(&map_client_list_lock);
2616 QLIST_FOREACH(client, &map_client_list, link) {
2617 if (client->bh == bh) {
2618 cpu_unregister_map_client_do(client);
2619 break;
2620 }
2621 }
2622 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002623}
2624
2625static void cpu_notify_map_clients(void)
2626{
Fam Zheng38e047b2015-03-16 17:03:35 +08002627 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002628 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002629 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002630}
2631
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002632bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2633{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002634 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002635 hwaddr l, xlat;
2636
Paolo Bonzini41063e12015-03-18 14:21:43 +01002637 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002638 while (len > 0) {
2639 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002640 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2641 if (!memory_access_is_direct(mr, is_write)) {
2642 l = memory_access_size(mr, l, addr);
2643 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002644 return false;
2645 }
2646 }
2647
2648 len -= l;
2649 addr += l;
2650 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002651 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002652 return true;
2653}
2654
aliguori6d16c2f2009-01-22 16:59:11 +00002655/* Map a physical memory region into a host virtual address.
2656 * May map a subset of the requested range, given by and returned in *plen.
2657 * May return NULL if resources needed to perform the mapping are exhausted.
2658 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002659 * Use cpu_register_map_client() to know when retrying the map operation is
2660 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002661 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002662void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002663 hwaddr addr,
2664 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002665 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002666{
Avi Kivitya8170e52012-10-23 12:30:10 +02002667 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002668 hwaddr done = 0;
2669 hwaddr l, xlat, base;
2670 MemoryRegion *mr, *this_mr;
2671 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002672
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002673 if (len == 0) {
2674 return NULL;
2675 }
aliguori6d16c2f2009-01-22 16:59:11 +00002676
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002677 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002678 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002679 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002680
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002681 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002682 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002683 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002684 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002685 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002686 /* Avoid unbounded allocations */
2687 l = MIN(l, TARGET_PAGE_SIZE);
2688 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002689 bounce.addr = addr;
2690 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002691
2692 memory_region_ref(mr);
2693 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002694 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002695 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2696 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002697 }
aliguori6d16c2f2009-01-22 16:59:11 +00002698
Paolo Bonzini41063e12015-03-18 14:21:43 +01002699 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002700 *plen = l;
2701 return bounce.buffer;
2702 }
2703
2704 base = xlat;
2705 raddr = memory_region_get_ram_addr(mr);
2706
2707 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002708 len -= l;
2709 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002710 done += l;
2711 if (len == 0) {
2712 break;
2713 }
2714
2715 l = len;
2716 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2717 if (this_mr != mr || xlat != base + done) {
2718 break;
2719 }
aliguori6d16c2f2009-01-22 16:59:11 +00002720 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002721
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002722 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002723 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002724 *plen = done;
2725 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002726}
2727
Avi Kivityac1970f2012-10-03 16:22:53 +02002728/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002729 * Will also mark the memory as dirty if is_write == 1. access_len gives
2730 * the amount of memory that was actually read or written by the caller.
2731 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002732void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2733 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002734{
2735 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002736 MemoryRegion *mr;
2737 ram_addr_t addr1;
2738
2739 mr = qemu_ram_addr_from_host(buffer, &addr1);
2740 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002741 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002742 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002743 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002744 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002745 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002746 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002747 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002748 return;
2749 }
2750 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002751 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2752 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002753 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002754 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002755 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002756 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002757 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002758 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002759}
bellardd0ecd2a2006-04-23 17:14:48 +00002760
Avi Kivitya8170e52012-10-23 12:30:10 +02002761void *cpu_physical_memory_map(hwaddr addr,
2762 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002763 int is_write)
2764{
2765 return address_space_map(&address_space_memory, addr, plen, is_write);
2766}
2767
Avi Kivitya8170e52012-10-23 12:30:10 +02002768void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2769 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002770{
2771 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2772}
2773
bellard8df1cd02005-01-28 22:37:22 +00002774/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002775static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2776 MemTxAttrs attrs,
2777 MemTxResult *result,
2778 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002779{
bellard8df1cd02005-01-28 22:37:22 +00002780 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002781 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002782 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002783 hwaddr l = 4;
2784 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002785 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002786 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002787
Paolo Bonzini41063e12015-03-18 14:21:43 +01002788 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002789 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002790 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002791 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002792
bellard8df1cd02005-01-28 22:37:22 +00002793 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002794 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002795#if defined(TARGET_WORDS_BIGENDIAN)
2796 if (endian == DEVICE_LITTLE_ENDIAN) {
2797 val = bswap32(val);
2798 }
2799#else
2800 if (endian == DEVICE_BIG_ENDIAN) {
2801 val = bswap32(val);
2802 }
2803#endif
bellard8df1cd02005-01-28 22:37:22 +00002804 } else {
2805 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002806 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002807 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002808 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002809 switch (endian) {
2810 case DEVICE_LITTLE_ENDIAN:
2811 val = ldl_le_p(ptr);
2812 break;
2813 case DEVICE_BIG_ENDIAN:
2814 val = ldl_be_p(ptr);
2815 break;
2816 default:
2817 val = ldl_p(ptr);
2818 break;
2819 }
Peter Maydell50013112015-04-26 16:49:24 +01002820 r = MEMTX_OK;
2821 }
2822 if (result) {
2823 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002824 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002825 if (release_lock) {
2826 qemu_mutex_unlock_iothread();
2827 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002828 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002829 return val;
2830}
2831
Peter Maydell50013112015-04-26 16:49:24 +01002832uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2833 MemTxAttrs attrs, MemTxResult *result)
2834{
2835 return address_space_ldl_internal(as, addr, attrs, result,
2836 DEVICE_NATIVE_ENDIAN);
2837}
2838
2839uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2840 MemTxAttrs attrs, MemTxResult *result)
2841{
2842 return address_space_ldl_internal(as, addr, attrs, result,
2843 DEVICE_LITTLE_ENDIAN);
2844}
2845
2846uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2847 MemTxAttrs attrs, MemTxResult *result)
2848{
2849 return address_space_ldl_internal(as, addr, attrs, result,
2850 DEVICE_BIG_ENDIAN);
2851}
2852
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002853uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002854{
Peter Maydell50013112015-04-26 16:49:24 +01002855 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002856}
2857
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002858uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002859{
Peter Maydell50013112015-04-26 16:49:24 +01002860 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002861}
2862
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002863uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002864{
Peter Maydell50013112015-04-26 16:49:24 +01002865 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002866}
2867
bellard84b7b8e2005-11-28 21:19:04 +00002868/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002869static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2870 MemTxAttrs attrs,
2871 MemTxResult *result,
2872 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002873{
bellard84b7b8e2005-11-28 21:19:04 +00002874 uint8_t *ptr;
2875 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002876 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002877 hwaddr l = 8;
2878 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002879 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002880 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002881
Paolo Bonzini41063e12015-03-18 14:21:43 +01002882 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002883 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002884 false);
2885 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002886 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002887
bellard84b7b8e2005-11-28 21:19:04 +00002888 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002889 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002890#if defined(TARGET_WORDS_BIGENDIAN)
2891 if (endian == DEVICE_LITTLE_ENDIAN) {
2892 val = bswap64(val);
2893 }
2894#else
2895 if (endian == DEVICE_BIG_ENDIAN) {
2896 val = bswap64(val);
2897 }
2898#endif
bellard84b7b8e2005-11-28 21:19:04 +00002899 } else {
2900 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002901 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002902 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002903 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002904 switch (endian) {
2905 case DEVICE_LITTLE_ENDIAN:
2906 val = ldq_le_p(ptr);
2907 break;
2908 case DEVICE_BIG_ENDIAN:
2909 val = ldq_be_p(ptr);
2910 break;
2911 default:
2912 val = ldq_p(ptr);
2913 break;
2914 }
Peter Maydell50013112015-04-26 16:49:24 +01002915 r = MEMTX_OK;
2916 }
2917 if (result) {
2918 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002919 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002920 if (release_lock) {
2921 qemu_mutex_unlock_iothread();
2922 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002923 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002924 return val;
2925}
2926
Peter Maydell50013112015-04-26 16:49:24 +01002927uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2928 MemTxAttrs attrs, MemTxResult *result)
2929{
2930 return address_space_ldq_internal(as, addr, attrs, result,
2931 DEVICE_NATIVE_ENDIAN);
2932}
2933
2934uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2935 MemTxAttrs attrs, MemTxResult *result)
2936{
2937 return address_space_ldq_internal(as, addr, attrs, result,
2938 DEVICE_LITTLE_ENDIAN);
2939}
2940
2941uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2942 MemTxAttrs attrs, MemTxResult *result)
2943{
2944 return address_space_ldq_internal(as, addr, attrs, result,
2945 DEVICE_BIG_ENDIAN);
2946}
2947
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002948uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002949{
Peter Maydell50013112015-04-26 16:49:24 +01002950 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002951}
2952
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002953uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002954{
Peter Maydell50013112015-04-26 16:49:24 +01002955 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002956}
2957
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002958uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002959{
Peter Maydell50013112015-04-26 16:49:24 +01002960 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002961}
2962
bellardaab33092005-10-30 20:48:42 +00002963/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01002964uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2965 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00002966{
2967 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01002968 MemTxResult r;
2969
2970 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2971 if (result) {
2972 *result = r;
2973 }
bellardaab33092005-10-30 20:48:42 +00002974 return val;
2975}
2976
Peter Maydell50013112015-04-26 16:49:24 +01002977uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2978{
2979 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2980}
2981
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002982/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002983static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2984 hwaddr addr,
2985 MemTxAttrs attrs,
2986 MemTxResult *result,
2987 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002988{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002989 uint8_t *ptr;
2990 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002991 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002992 hwaddr l = 2;
2993 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002994 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002995 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002996
Paolo Bonzini41063e12015-03-18 14:21:43 +01002997 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002998 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002999 false);
3000 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003001 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003002
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003003 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003004 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003005#if defined(TARGET_WORDS_BIGENDIAN)
3006 if (endian == DEVICE_LITTLE_ENDIAN) {
3007 val = bswap16(val);
3008 }
3009#else
3010 if (endian == DEVICE_BIG_ENDIAN) {
3011 val = bswap16(val);
3012 }
3013#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003014 } else {
3015 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003016 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003017 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003018 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003019 switch (endian) {
3020 case DEVICE_LITTLE_ENDIAN:
3021 val = lduw_le_p(ptr);
3022 break;
3023 case DEVICE_BIG_ENDIAN:
3024 val = lduw_be_p(ptr);
3025 break;
3026 default:
3027 val = lduw_p(ptr);
3028 break;
3029 }
Peter Maydell50013112015-04-26 16:49:24 +01003030 r = MEMTX_OK;
3031 }
3032 if (result) {
3033 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003034 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003035 if (release_lock) {
3036 qemu_mutex_unlock_iothread();
3037 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003038 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003039 return val;
bellardaab33092005-10-30 20:48:42 +00003040}
3041
Peter Maydell50013112015-04-26 16:49:24 +01003042uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3043 MemTxAttrs attrs, MemTxResult *result)
3044{
3045 return address_space_lduw_internal(as, addr, attrs, result,
3046 DEVICE_NATIVE_ENDIAN);
3047}
3048
3049uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3050 MemTxAttrs attrs, MemTxResult *result)
3051{
3052 return address_space_lduw_internal(as, addr, attrs, result,
3053 DEVICE_LITTLE_ENDIAN);
3054}
3055
3056uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3057 MemTxAttrs attrs, MemTxResult *result)
3058{
3059 return address_space_lduw_internal(as, addr, attrs, result,
3060 DEVICE_BIG_ENDIAN);
3061}
3062
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003063uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003064{
Peter Maydell50013112015-04-26 16:49:24 +01003065 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003066}
3067
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003068uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003069{
Peter Maydell50013112015-04-26 16:49:24 +01003070 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003071}
3072
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003073uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003074{
Peter Maydell50013112015-04-26 16:49:24 +01003075 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003076}
3077
bellard8df1cd02005-01-28 22:37:22 +00003078/* warning: addr must be aligned. The ram page is not masked as dirty
3079 and the code inside is not invalidated. It is useful if the dirty
3080 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003081void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3082 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003083{
bellard8df1cd02005-01-28 22:37:22 +00003084 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003085 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003086 hwaddr l = 4;
3087 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003088 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003089 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003090 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003091
Paolo Bonzini41063e12015-03-18 14:21:43 +01003092 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003093 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003094 true);
3095 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003096 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003097
Peter Maydell50013112015-04-26 16:49:24 +01003098 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003099 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003100 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003101 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003102 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003103
Paolo Bonzini845b6212015-03-23 11:45:53 +01003104 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3105 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003106 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003107 r = MEMTX_OK;
3108 }
3109 if (result) {
3110 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003111 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003112 if (release_lock) {
3113 qemu_mutex_unlock_iothread();
3114 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003115 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003116}
3117
Peter Maydell50013112015-04-26 16:49:24 +01003118void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3119{
3120 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3121}
3122
bellard8df1cd02005-01-28 22:37:22 +00003123/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003124static inline void address_space_stl_internal(AddressSpace *as,
3125 hwaddr addr, uint32_t val,
3126 MemTxAttrs attrs,
3127 MemTxResult *result,
3128 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003129{
bellard8df1cd02005-01-28 22:37:22 +00003130 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003131 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003132 hwaddr l = 4;
3133 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003134 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003135 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003136
Paolo Bonzini41063e12015-03-18 14:21:43 +01003137 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003138 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003139 true);
3140 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003141 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003142
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003143#if defined(TARGET_WORDS_BIGENDIAN)
3144 if (endian == DEVICE_LITTLE_ENDIAN) {
3145 val = bswap32(val);
3146 }
3147#else
3148 if (endian == DEVICE_BIG_ENDIAN) {
3149 val = bswap32(val);
3150 }
3151#endif
Peter Maydell50013112015-04-26 16:49:24 +01003152 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003153 } else {
bellard8df1cd02005-01-28 22:37:22 +00003154 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003155 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003156 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157 switch (endian) {
3158 case DEVICE_LITTLE_ENDIAN:
3159 stl_le_p(ptr, val);
3160 break;
3161 case DEVICE_BIG_ENDIAN:
3162 stl_be_p(ptr, val);
3163 break;
3164 default:
3165 stl_p(ptr, val);
3166 break;
3167 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003168 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003169 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003170 }
Peter Maydell50013112015-04-26 16:49:24 +01003171 if (result) {
3172 *result = r;
3173 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003174 if (release_lock) {
3175 qemu_mutex_unlock_iothread();
3176 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003177 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003178}
3179
3180void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3181 MemTxAttrs attrs, MemTxResult *result)
3182{
3183 address_space_stl_internal(as, addr, val, attrs, result,
3184 DEVICE_NATIVE_ENDIAN);
3185}
3186
3187void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3188 MemTxAttrs attrs, MemTxResult *result)
3189{
3190 address_space_stl_internal(as, addr, val, attrs, result,
3191 DEVICE_LITTLE_ENDIAN);
3192}
3193
3194void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3195 MemTxAttrs attrs, MemTxResult *result)
3196{
3197 address_space_stl_internal(as, addr, val, attrs, result,
3198 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003199}
3200
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003201void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003202{
Peter Maydell50013112015-04-26 16:49:24 +01003203 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003204}
3205
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003206void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003207{
Peter Maydell50013112015-04-26 16:49:24 +01003208 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003209}
3210
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003211void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003212{
Peter Maydell50013112015-04-26 16:49:24 +01003213 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003214}
3215
bellardaab33092005-10-30 20:48:42 +00003216/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003217void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3218 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003219{
3220 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003221 MemTxResult r;
3222
3223 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3224 if (result) {
3225 *result = r;
3226 }
3227}
3228
3229void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3230{
3231 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003232}
3233
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003234/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003235static inline void address_space_stw_internal(AddressSpace *as,
3236 hwaddr addr, uint32_t val,
3237 MemTxAttrs attrs,
3238 MemTxResult *result,
3239 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003240{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003241 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003242 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003243 hwaddr l = 2;
3244 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003245 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003246 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003247
Paolo Bonzini41063e12015-03-18 14:21:43 +01003248 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003249 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003250 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003251 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003252
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253#if defined(TARGET_WORDS_BIGENDIAN)
3254 if (endian == DEVICE_LITTLE_ENDIAN) {
3255 val = bswap16(val);
3256 }
3257#else
3258 if (endian == DEVICE_BIG_ENDIAN) {
3259 val = bswap16(val);
3260 }
3261#endif
Peter Maydell50013112015-04-26 16:49:24 +01003262 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003263 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003264 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003265 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003266 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003267 switch (endian) {
3268 case DEVICE_LITTLE_ENDIAN:
3269 stw_le_p(ptr, val);
3270 break;
3271 case DEVICE_BIG_ENDIAN:
3272 stw_be_p(ptr, val);
3273 break;
3274 default:
3275 stw_p(ptr, val);
3276 break;
3277 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003278 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003279 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003280 }
Peter Maydell50013112015-04-26 16:49:24 +01003281 if (result) {
3282 *result = r;
3283 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003284 if (release_lock) {
3285 qemu_mutex_unlock_iothread();
3286 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003287 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003288}
3289
3290void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3291 MemTxAttrs attrs, MemTxResult *result)
3292{
3293 address_space_stw_internal(as, addr, val, attrs, result,
3294 DEVICE_NATIVE_ENDIAN);
3295}
3296
3297void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3298 MemTxAttrs attrs, MemTxResult *result)
3299{
3300 address_space_stw_internal(as, addr, val, attrs, result,
3301 DEVICE_LITTLE_ENDIAN);
3302}
3303
3304void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3305 MemTxAttrs attrs, MemTxResult *result)
3306{
3307 address_space_stw_internal(as, addr, val, attrs, result,
3308 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003309}
3310
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003311void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003312{
Peter Maydell50013112015-04-26 16:49:24 +01003313 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003314}
3315
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003316void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003317{
Peter Maydell50013112015-04-26 16:49:24 +01003318 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003319}
3320
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003321void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003322{
Peter Maydell50013112015-04-26 16:49:24 +01003323 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003324}
3325
bellardaab33092005-10-30 20:48:42 +00003326/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003327void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3328 MemTxAttrs attrs, MemTxResult *result)
3329{
3330 MemTxResult r;
3331 val = tswap64(val);
3332 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3333 if (result) {
3334 *result = r;
3335 }
3336}
3337
3338void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3339 MemTxAttrs attrs, MemTxResult *result)
3340{
3341 MemTxResult r;
3342 val = cpu_to_le64(val);
3343 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3344 if (result) {
3345 *result = r;
3346 }
3347}
3348void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3349 MemTxAttrs attrs, MemTxResult *result)
3350{
3351 MemTxResult r;
3352 val = cpu_to_be64(val);
3353 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3354 if (result) {
3355 *result = r;
3356 }
3357}
3358
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003359void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003360{
Peter Maydell50013112015-04-26 16:49:24 +01003361 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003362}
3363
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003364void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365{
Peter Maydell50013112015-04-26 16:49:24 +01003366 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003367}
3368
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003369void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370{
Peter Maydell50013112015-04-26 16:49:24 +01003371 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003372}
3373
aliguori5e2972f2009-03-28 17:51:36 +00003374/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003375int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003376 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003377{
3378 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003379 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003380 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003381
3382 while (len > 0) {
3383 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003384 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003385 /* if no physical page mapped, return an error */
3386 if (phys_addr == -1)
3387 return -1;
3388 l = (page + TARGET_PAGE_SIZE) - addr;
3389 if (l > len)
3390 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003391 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003392 if (is_write) {
3393 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3394 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003395 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3396 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003397 }
bellard13eb76e2004-01-24 15:23:36 +00003398 len -= l;
3399 buf += l;
3400 addr += l;
3401 }
3402 return 0;
3403}
Paul Brooka68fe892010-03-01 00:08:59 +00003404#endif
bellard13eb76e2004-01-24 15:23:36 +00003405
Blue Swirl8e4a4242013-01-06 18:30:17 +00003406/*
3407 * A helper function for the _utterly broken_ virtio device model to find out if
3408 * it's running on a big endian machine. Don't do this at home kids!
3409 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003410bool target_words_bigendian(void);
3411bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003412{
3413#if defined(TARGET_WORDS_BIGENDIAN)
3414 return true;
3415#else
3416 return false;
3417#endif
3418}
3419
Wen Congyang76f35532012-05-07 12:04:18 +08003420#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003421bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003422{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003423 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003424 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003425 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003426
Paolo Bonzini41063e12015-03-18 14:21:43 +01003427 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003428 mr = address_space_translate(&address_space_memory,
3429 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003430
Paolo Bonzini41063e12015-03-18 14:21:43 +01003431 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3432 rcu_read_unlock();
3433 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003434}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003435
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003436int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003437{
3438 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003439 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003440
Mike Day0dc3f442013-09-05 14:41:35 -04003441 rcu_read_lock();
3442 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003443 ret = func(block->idstr, block->host, block->offset,
3444 block->used_length, opaque);
3445 if (ret) {
3446 break;
3447 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003448 }
Mike Day0dc3f442013-09-05 14:41:35 -04003449 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003450 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003451}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003452#endif