blob: 76b3b6cfe4c9c824a30f9db3b03e513a99b72deb [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060029#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020032#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010033#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/timer.h"
35#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020036#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020052#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020054#include "qemu/range.h"
55
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020059static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000060
Paolo Bonzinia3161032012-11-14 15:54:48 +010061RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030062
63static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030064static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030065
Avi Kivityf6790af2012-10-02 20:13:51 +020066AddressSpace address_space_io;
67AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020070static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080072/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080075/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020078/* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
80 */
81#define RAM_RESIZEABLE (1 << 2)
82
pbrooke2eef172008-06-08 01:09:01 +000083#endif
bellard9fa3e852004-01-04 18:06:42 +000084
Andreas Färberbdc44642013-06-24 23:50:24 +020085struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000086/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020088DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000089/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000090 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000091 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010092int use_icount;
bellard6a00d602005-11-21 23:25:50 +000093
pbrooke2eef172008-06-08 01:09:01 +000094#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020095
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020096typedef struct PhysPageEntry PhysPageEntry;
97
98struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020099 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200100 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200102 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103};
104
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
106
Paolo Bonzini03f49952013-11-07 17:14:36 +0100107/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100108#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100109
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200110#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100111#define P_L2_SIZE (1 << P_L2_BITS)
112
113#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
114
115typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200116
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200117typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100118 struct rcu_head rcu;
119
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200120 unsigned sections_nb;
121 unsigned sections_nb_alloc;
122 unsigned nodes_nb;
123 unsigned nodes_nb_alloc;
124 Node *nodes;
125 MemoryRegionSection *sections;
126} PhysPageMap;
127
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200128struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200131 /* This is a multi-level map on the physical address space.
132 * The bottom level has pointers to MemoryRegionSections.
133 */
134 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200135 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200136 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200137};
138
Jan Kiszka90260c62013-05-26 21:46:51 +0200139#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
140typedef struct subpage_t {
141 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200142 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200143 hwaddr base;
144 uint16_t sub_section[TARGET_PAGE_SIZE];
145} subpage_t;
146
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200147#define PHYS_SECTION_UNASSIGNED 0
148#define PHYS_SECTION_NOTDIRTY 1
149#define PHYS_SECTION_ROM 2
150#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200151
pbrooke2eef172008-06-08 01:09:01 +0000152static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300153static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000154static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000155
Avi Kivity1ec9b902012-01-02 12:47:48 +0200156static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000157#endif
bellard54936002003-05-13 00:25:15 +0000158
Paul Brook6d9a1302010-02-28 23:55:53 +0000159#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200162{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
164 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
165 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
166 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167 }
168}
169
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200170static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200171{
172 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200173 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200174
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200175 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200177 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100178 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200179 map->nodes[ret][i].skip = 1;
180 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200182 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183}
184
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
186 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200187 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
189 PhysPageEntry *p;
190 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200193 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200194 lp->ptr = phys_map_node_alloc(map);
195 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100197 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200198 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200199 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200200 }
201 }
202 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200206
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200208 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200209 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200210 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200211 *index += step;
212 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200213 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200215 }
216 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200217 }
218}
219
Avi Kivityac1970f2012-10-03 16:22:53 +0200220static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200221 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200222 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000223{
Avi Kivity29990972012-02-13 20:21:20 +0200224 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000226
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000228}
229
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200289 }
290}
291
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200293 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000294{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200295 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200296 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200297 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200298
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200301 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200302 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200303 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200305 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200314}
315
Blue Swirle5548612012-04-21 13:08:33 +0000316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000319 && mr != &io_mem_watch;
320}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200321
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100322/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200324 hwaddr addr,
325 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200326{
Jan Kiszka90260c62013-05-26 21:46:51 +0200327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200334 }
335 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200336}
337
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100338/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200339static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200341 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200342{
343 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100344 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200345
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200346 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347 /* Compute offset within MemoryRegionSection */
348 addr -= section->offset_within_address_space;
349
350 /* Compute offset within MemoryRegion */
351 *xlat = addr + section->offset_within_region;
352
353 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100354 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355 return section;
356}
Jan Kiszka90260c62013-05-26 21:46:51 +0200357
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100358static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
359{
360 if (memory_region_is_ram(mr)) {
361 return !(is_write && mr->readonly);
362 }
363 if (memory_region_is_romd(mr)) {
364 return !is_write;
365 }
366
367 return false;
368}
369
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200370MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
371 hwaddr *xlat, hwaddr *plen,
372 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200373{
Avi Kivity30951152012-10-30 13:47:46 +0200374 IOMMUTLBEntry iotlb;
375 MemoryRegionSection *section;
376 MemoryRegion *mr;
377 hwaddr len = *plen;
378
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100379 rcu_read_lock();
Avi Kivity30951152012-10-30 13:47:46 +0200380 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100381 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
382 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200383 mr = section->mr;
384
385 if (!mr->iommu_ops) {
386 break;
387 }
388
Le Tan8d7b8cb2014-08-16 13:55:37 +0800389 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200390 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
391 | (addr & iotlb.addr_mask));
392 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
393 if (!(iotlb.perm & (1 << is_write))) {
394 mr = &io_mem_unassigned;
395 break;
396 }
397
398 as = iotlb.target_as;
399 }
400
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000401 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100402 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
403 len = MIN(page, len);
404 }
405
Avi Kivity30951152012-10-30 13:47:46 +0200406 *plen = len;
407 *xlat = addr;
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100408 rcu_read_unlock();
Avi Kivity30951152012-10-30 13:47:46 +0200409 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200410}
411
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100412/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200413MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200414address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
415 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200416{
Avi Kivity30951152012-10-30 13:47:46 +0200417 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200418 section = address_space_translate_internal(cpu->memory_dispatch,
419 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200420
421 assert(!section->mr->iommu_ops);
422 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200423}
bellard9fa3e852004-01-04 18:06:42 +0000424#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000425
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200426void cpu_exec_init_all(void)
427{
428#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700429 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200430 memory_map_init();
431 io_mem_init();
432#endif
433}
434
Andreas Färberb170fce2013-01-20 20:23:22 +0100435#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000436
Juan Quintelae59fb372009-09-29 22:48:21 +0200437static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200438{
Andreas Färber259186a2013-01-17 18:51:17 +0100439 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200440
aurel323098dba2009-03-07 21:28:24 +0000441 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
442 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100443 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100444 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000445
446 return 0;
447}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200448
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400449static int cpu_common_pre_load(void *opaque)
450{
451 CPUState *cpu = opaque;
452
Paolo Bonziniadee6422014-12-19 12:53:14 +0100453 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454
455 return 0;
456}
457
458static bool cpu_common_exception_index_needed(void *opaque)
459{
460 CPUState *cpu = opaque;
461
Paolo Bonziniadee6422014-12-19 12:53:14 +0100462 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400463}
464
465static const VMStateDescription vmstate_cpu_common_exception_index = {
466 .name = "cpu_common/exception_index",
467 .version_id = 1,
468 .minimum_version_id = 1,
469 .fields = (VMStateField[]) {
470 VMSTATE_INT32(exception_index, CPUState),
471 VMSTATE_END_OF_LIST()
472 }
473};
474
Andreas Färber1a1562f2013-06-17 04:09:11 +0200475const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476 .name = "cpu_common",
477 .version_id = 1,
478 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400479 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200480 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200481 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100482 VMSTATE_UINT32(halted, CPUState),
483 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200484 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485 },
486 .subsections = (VMStateSubsection[]) {
487 {
488 .vmsd = &vmstate_cpu_common_exception_index,
489 .needed = cpu_common_exception_index_needed,
490 } , {
491 /* empty */
492 }
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200493 }
494};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200495
pbrook9656f322008-07-01 20:01:19 +0000496#endif
497
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100498CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400499{
Andreas Färberbdc44642013-06-24 23:50:24 +0200500 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400501
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100503 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200504 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100505 }
Glauber Costa950f1472009-06-09 12:15:18 -0400506 }
507
Andreas Färberbdc44642013-06-24 23:50:24 +0200508 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400509}
510
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000511#if !defined(CONFIG_USER_ONLY)
512void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
513{
514 /* We only support one address space per cpu at the moment. */
515 assert(cpu->as == as);
516
517 if (cpu->tcg_as_listener) {
518 memory_listener_unregister(cpu->tcg_as_listener);
519 } else {
520 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
521 }
522 cpu->tcg_as_listener->commit = tcg_commit;
523 memory_listener_register(cpu->tcg_as_listener, as);
524}
525#endif
526
Andreas Färber9349b4f2012-03-14 01:38:32 +0100527void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000528{
Andreas Färber9f09e182012-05-03 06:59:07 +0200529 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100530 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200531 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000532 int cpu_index;
533
pbrookc2764712009-03-07 15:24:59 +0000534#if defined(CONFIG_USER_ONLY)
535 cpu_list_lock();
536#endif
bellard6a00d602005-11-21 23:25:50 +0000537 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200538 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000539 cpu_index++;
540 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100541 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100542 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200543 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200544 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100545#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000546 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200547 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100548#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000550#if defined(CONFIG_USER_ONLY)
551 cpu_list_unlock();
552#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200553 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
554 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
555 }
pbrookb3c77242008-06-30 16:31:04 +0000556#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600557 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000558 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100559 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200560 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000561#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100562 if (cc->vmsd != NULL) {
563 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
564 }
bellardfd6ce8f2003-05-14 19:00:11 +0000565}
566
Paul Brook94df27f2010-02-28 23:47:45 +0000567#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200568static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000569{
570 tb_invalidate_phys_page_range(pc, pc + 1, 0);
571}
572#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200573static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400574{
Max Filippove8262a12013-09-27 22:29:17 +0400575 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
576 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100578 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400579 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400580}
bellardc27004e2005-01-03 23:35:10 +0000581#endif
bellardd720b932004-04-25 17:57:43 +0000582
Paul Brookc527ee82010-03-01 03:31:14 +0000583#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200584void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000585
586{
587}
588
Peter Maydell3ee887e2014-09-12 14:06:48 +0100589int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
590 int flags)
591{
592 return -ENOSYS;
593}
594
595void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
596{
597}
598
Andreas Färber75a34032013-09-02 16:57:02 +0200599int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000600 int flags, CPUWatchpoint **watchpoint)
601{
602 return -ENOSYS;
603}
604#else
pbrook6658ffb2007-03-16 23:58:11 +0000605/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200606int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000607 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000608{
aliguoric0ce9982008-11-25 22:13:57 +0000609 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000610
Peter Maydell05068c02014-09-12 14:06:48 +0100611 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700612 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200613 error_report("tried to set invalid watchpoint at %"
614 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000615 return -EINVAL;
616 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500617 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000618
aliguoria1d1bb32008-11-18 20:07:32 +0000619 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100620 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000621 wp->flags = flags;
622
aliguori2dc9f412008-11-18 20:56:59 +0000623 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200624 if (flags & BP_GDB) {
625 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
626 } else {
627 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
628 }
aliguoria1d1bb32008-11-18 20:07:32 +0000629
Andreas Färber31b030d2013-09-04 01:29:02 +0200630 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000631
632 if (watchpoint)
633 *watchpoint = wp;
634 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000635}
636
aliguoria1d1bb32008-11-18 20:07:32 +0000637/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200638int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000639 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000640{
aliguoria1d1bb32008-11-18 20:07:32 +0000641 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000642
Andreas Färberff4700b2013-08-26 18:23:18 +0200643 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100644 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000645 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200646 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000647 return 0;
648 }
649 }
aliguoria1d1bb32008-11-18 20:07:32 +0000650 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000651}
652
aliguoria1d1bb32008-11-18 20:07:32 +0000653/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200654void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000655{
Andreas Färberff4700b2013-08-26 18:23:18 +0200656 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000657
Andreas Färber31b030d2013-09-04 01:29:02 +0200658 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000659
Anthony Liguori7267c092011-08-20 22:09:37 -0500660 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000661}
662
aliguoria1d1bb32008-11-18 20:07:32 +0000663/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200664void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000665{
aliguoric0ce9982008-11-25 22:13:57 +0000666 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000667
Andreas Färberff4700b2013-08-26 18:23:18 +0200668 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200669 if (wp->flags & mask) {
670 cpu_watchpoint_remove_by_ref(cpu, wp);
671 }
aliguoric0ce9982008-11-25 22:13:57 +0000672 }
aliguoria1d1bb32008-11-18 20:07:32 +0000673}
Peter Maydell05068c02014-09-12 14:06:48 +0100674
675/* Return true if this watchpoint address matches the specified
676 * access (ie the address range covered by the watchpoint overlaps
677 * partially or completely with the address range covered by the
678 * access).
679 */
680static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
681 vaddr addr,
682 vaddr len)
683{
684 /* We know the lengths are non-zero, but a little caution is
685 * required to avoid errors in the case where the range ends
686 * exactly at the top of the address space and so addr + len
687 * wraps round to zero.
688 */
689 vaddr wpend = wp->vaddr + wp->len - 1;
690 vaddr addrend = addr + len - 1;
691
692 return !(addr > wpend || wp->vaddr > addrend);
693}
694
Paul Brookc527ee82010-03-01 03:31:14 +0000695#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000696
697/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200698int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000699 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000700{
aliguoric0ce9982008-11-25 22:13:57 +0000701 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000702
Anthony Liguori7267c092011-08-20 22:09:37 -0500703 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000704
705 bp->pc = pc;
706 bp->flags = flags;
707
aliguori2dc9f412008-11-18 20:56:59 +0000708 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200709 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200710 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200711 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200712 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200713 }
aliguoria1d1bb32008-11-18 20:07:32 +0000714
Andreas Färberf0c3c502013-08-26 21:22:53 +0200715 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000716
Andreas Färber00b941e2013-06-29 18:55:54 +0200717 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000718 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200719 }
aliguoria1d1bb32008-11-18 20:07:32 +0000720 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000721}
722
723/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200724int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000725{
aliguoria1d1bb32008-11-18 20:07:32 +0000726 CPUBreakpoint *bp;
727
Andreas Färberf0c3c502013-08-26 21:22:53 +0200728 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000729 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200730 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000731 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000732 }
bellard4c3a88a2003-07-26 12:06:08 +0000733 }
aliguoria1d1bb32008-11-18 20:07:32 +0000734 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000735}
736
aliguoria1d1bb32008-11-18 20:07:32 +0000737/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200738void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000739{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200740 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
741
742 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000743
Anthony Liguori7267c092011-08-20 22:09:37 -0500744 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000745}
746
747/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200748void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000749{
aliguoric0ce9982008-11-25 22:13:57 +0000750 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000751
Andreas Färberf0c3c502013-08-26 21:22:53 +0200752 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200753 if (bp->flags & mask) {
754 cpu_breakpoint_remove_by_ref(cpu, bp);
755 }
aliguoric0ce9982008-11-25 22:13:57 +0000756 }
bellard4c3a88a2003-07-26 12:06:08 +0000757}
758
bellardc33a3462003-07-29 20:50:33 +0000759/* enable or disable single step mode. EXCP_DEBUG is returned by the
760 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200761void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000762{
Andreas Färbered2803d2013-06-21 20:20:45 +0200763 if (cpu->singlestep_enabled != enabled) {
764 cpu->singlestep_enabled = enabled;
765 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200766 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200767 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100768 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000769 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200770 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000771 tb_flush(env);
772 }
bellardc33a3462003-07-29 20:50:33 +0000773 }
bellardc33a3462003-07-29 20:50:33 +0000774}
775
Andreas Färbera47dddd2013-09-03 17:38:47 +0200776void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000777{
778 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000779 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000780
781 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000782 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000783 fprintf(stderr, "qemu: fatal: ");
784 vfprintf(stderr, fmt, ap);
785 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200786 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000787 if (qemu_log_enabled()) {
788 qemu_log("qemu: fatal: ");
789 qemu_log_vprintf(fmt, ap2);
790 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200791 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000792 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000793 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000794 }
pbrook493ae1f2007-11-23 16:53:59 +0000795 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000796 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200797#if defined(CONFIG_USER_ONLY)
798 {
799 struct sigaction act;
800 sigfillset(&act.sa_mask);
801 act.sa_handler = SIG_DFL;
802 sigaction(SIGABRT, &act, NULL);
803 }
804#endif
bellard75012672003-06-21 13:11:07 +0000805 abort();
806}
807
bellard01243112004-01-04 15:48:17 +0000808#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200809static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
810{
811 RAMBlock *block;
812
813 /* The list is protected by the iothread lock here. */
814 block = ram_list.mru_block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200815 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200816 goto found;
817 }
818 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200819 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200820 goto found;
821 }
822 }
823
824 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
825 abort();
826
827found:
828 ram_list.mru_block = block;
829 return block;
830}
831
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200832static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000833{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200834 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200835 RAMBlock *block;
836 ram_addr_t end;
837
838 end = TARGET_PAGE_ALIGN(start + length);
839 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000840
Paolo Bonzini041603f2013-09-09 17:49:45 +0200841 block = qemu_get_ram_block(start);
842 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200843 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000844 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200845}
846
847/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200848void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200849 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200850{
Juan Quintelad24981d2012-05-22 00:42:40 +0200851 if (length == 0)
852 return;
Michael S. Tsirkinc8d6f662014-11-17 17:54:07 +0200853 cpu_physical_memory_clear_dirty_range_type(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200854
855 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200856 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200857 }
bellard1ccde1c2004-02-06 19:46:14 +0000858}
859
Juan Quintela981fdf22013-10-10 11:54:09 +0200860static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000861{
862 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000863}
864
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100865/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200866hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200867 MemoryRegionSection *section,
868 target_ulong vaddr,
869 hwaddr paddr, hwaddr xlat,
870 int prot,
871 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000872{
Avi Kivitya8170e52012-10-23 12:30:10 +0200873 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000874 CPUWatchpoint *wp;
875
Blue Swirlcc5bea62012-04-14 14:56:48 +0000876 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000877 /* Normal RAM. */
878 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200879 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000880 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200881 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000882 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200883 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000884 }
885 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100886 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200887 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000888 }
889
890 /* Make accesses to pages with watchpoints go via the
891 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200892 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100893 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000894 /* Avoid trapping reads of pages with a write breakpoint. */
895 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200896 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000897 *address |= TLB_MMIO;
898 break;
899 }
900 }
901 }
902
903 return iotlb;
904}
bellard9fa3e852004-01-04 18:06:42 +0000905#endif /* defined(CONFIG_USER_ONLY) */
906
pbrooke2eef172008-06-08 01:09:01 +0000907#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000908
Anthony Liguoric227f092009-10-01 16:12:16 -0500909static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200910 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200911static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200912
Igor Mammedova2b257d2014-10-31 16:38:37 +0000913static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
914 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200915
916/*
917 * Set a custom physical guest memory alloator.
918 * Accelerators with unusual needs may need this. Hopefully, we can
919 * get rid of it eventually.
920 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000921void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200922{
923 phys_mem_alloc = alloc;
924}
925
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200926static uint16_t phys_section_add(PhysPageMap *map,
927 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200928{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200929 /* The physical section number is ORed with a page-aligned
930 * pointer to produce the iotlb entries. Thus it should
931 * never overflow into the page-aligned value.
932 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200933 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200934
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200935 if (map->sections_nb == map->sections_nb_alloc) {
936 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
937 map->sections = g_renew(MemoryRegionSection, map->sections,
938 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200939 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200940 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200941 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200942 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200943}
944
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200945static void phys_section_destroy(MemoryRegion *mr)
946{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200947 memory_region_unref(mr);
948
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200949 if (mr->subpage) {
950 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700951 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200952 g_free(subpage);
953 }
954}
955
Paolo Bonzini60926662013-05-29 12:30:26 +0200956static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200957{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200958 while (map->sections_nb > 0) {
959 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200960 phys_section_destroy(section->mr);
961 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200962 g_free(map->sections);
963 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200964}
965
Avi Kivityac1970f2012-10-03 16:22:53 +0200966static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200967{
968 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200969 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200970 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200971 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200972 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200973 MemoryRegionSection subsection = {
974 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200975 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200976 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200977 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200978
Avi Kivityf3705d52012-03-08 16:16:34 +0200979 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200980
Avi Kivityf3705d52012-03-08 16:16:34 +0200981 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200982 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100983 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200984 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200985 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200986 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200987 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200988 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200989 }
990 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200991 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200992 subpage_register(subpage, start, end,
993 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200994}
995
996
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200997static void register_multipage(AddressSpaceDispatch *d,
998 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000999{
Avi Kivitya8170e52012-10-23 12:30:10 +02001000 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001001 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001002 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1003 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001004
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001005 assert(num_pages);
1006 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001007}
1008
Avi Kivityac1970f2012-10-03 16:22:53 +02001009static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001010{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001011 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001012 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001013 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001014 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001015
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001016 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1017 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1018 - now.offset_within_address_space;
1019
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001020 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001021 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001022 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001023 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001024 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001025 while (int128_ne(remain.size, now.size)) {
1026 remain.size = int128_sub(remain.size, now.size);
1027 remain.offset_within_address_space += int128_get64(now.size);
1028 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001029 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001030 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001031 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001032 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001033 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001034 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001035 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001036 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001037 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001038 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001039 }
1040}
1041
Sheng Yang62a27442010-01-26 19:21:16 +08001042void qemu_flush_coalesced_mmio_buffer(void)
1043{
1044 if (kvm_enabled())
1045 kvm_flush_coalesced_mmio_buffer();
1046}
1047
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001048void qemu_mutex_lock_ramlist(void)
1049{
1050 qemu_mutex_lock(&ram_list.mutex);
1051}
1052
1053void qemu_mutex_unlock_ramlist(void)
1054{
1055 qemu_mutex_unlock(&ram_list.mutex);
1056}
1057
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001058#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001059
1060#include <sys/vfs.h>
1061
1062#define HUGETLBFS_MAGIC 0x958458f6
1063
Hu Taofc7a5802014-09-09 13:28:01 +08001064static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001065{
1066 struct statfs fs;
1067 int ret;
1068
1069 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001070 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001071 } while (ret != 0 && errno == EINTR);
1072
1073 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001074 error_setg_errno(errp, errno, "failed to get page size of file %s",
1075 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001076 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001077 }
1078
1079 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001080 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001081
1082 return fs.f_bsize;
1083}
1084
Alex Williamson04b16652010-07-02 11:13:17 -06001085static void *file_ram_alloc(RAMBlock *block,
1086 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001087 const char *path,
1088 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001089{
1090 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001091 char *sanitized_name;
1092 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001093 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001094 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001095 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001096 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001097
Hu Taofc7a5802014-09-09 13:28:01 +08001098 hpagesize = gethugepagesize(path, &local_err);
1099 if (local_err) {
1100 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001101 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001102 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001103 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001104
1105 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001106 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1107 "or larger than huge page size 0x%" PRIx64,
1108 memory, hpagesize);
1109 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001110 }
1111
1112 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001113 error_setg(errp,
1114 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001115 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001116 }
1117
Peter Feiner8ca761f2013-03-04 13:54:25 -05001118 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001119 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001120 for (c = sanitized_name; *c != '\0'; c++) {
1121 if (*c == '/')
1122 *c = '_';
1123 }
1124
1125 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1126 sanitized_name);
1127 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001128
1129 fd = mkstemp(filename);
1130 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001131 error_setg_errno(errp, errno,
1132 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001133 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001134 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001135 }
1136 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001137 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001138
1139 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1140
1141 /*
1142 * ftruncate is not supported by hugetlbfs in older
1143 * hosts, so don't bother bailing out on errors.
1144 * If anything goes wrong with it under other filesystems,
1145 * mmap will fail.
1146 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001147 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001148 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001149 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001150
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001151 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1152 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1153 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001154 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001155 error_setg_errno(errp, errno,
1156 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001157 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001158 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001159 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001160
1161 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001162 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001163 }
1164
Alex Williamson04b16652010-07-02 11:13:17 -06001165 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001166 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001167
1168error:
1169 if (mem_prealloc) {
Luiz Capitulinoe4d9df42014-09-08 13:50:05 -04001170 error_report("%s\n", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001171 exit(1);
1172 }
1173 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001174}
1175#endif
1176
Alex Williamsond17b5282010-06-25 11:08:38 -06001177static ram_addr_t find_ram_offset(ram_addr_t size)
1178{
Alex Williamson04b16652010-07-02 11:13:17 -06001179 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001180 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001181
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001182 assert(size != 0); /* it would hand out same offset multiple times */
1183
Paolo Bonzinia3161032012-11-14 15:54:48 +01001184 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001185 return 0;
1186
Paolo Bonzinia3161032012-11-14 15:54:48 +01001187 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001188 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001189
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001190 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001191
Paolo Bonzinia3161032012-11-14 15:54:48 +01001192 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001193 if (next_block->offset >= end) {
1194 next = MIN(next, next_block->offset);
1195 }
1196 }
1197 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001198 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001199 mingap = next - end;
1200 }
1201 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001202
1203 if (offset == RAM_ADDR_MAX) {
1204 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1205 (uint64_t)size);
1206 abort();
1207 }
1208
Alex Williamson04b16652010-07-02 11:13:17 -06001209 return offset;
1210}
1211
Juan Quintela652d7ec2012-07-20 10:37:54 +02001212ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001213{
Alex Williamsond17b5282010-06-25 11:08:38 -06001214 RAMBlock *block;
1215 ram_addr_t last = 0;
1216
Paolo Bonzinia3161032012-11-14 15:54:48 +01001217 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001218 last = MAX(last, block->offset + block->max_length);
Alex Williamsond17b5282010-06-25 11:08:38 -06001219
1220 return last;
1221}
1222
Jason Baronddb97f12012-08-02 15:44:16 -04001223static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1224{
1225 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001226
1227 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001228 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1229 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001230 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1231 if (ret) {
1232 perror("qemu_madvise");
1233 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1234 "but dump_guest_core=off specified\n");
1235 }
1236 }
1237}
1238
Hu Tao20cfe882014-04-02 15:13:26 +08001239static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001240{
Hu Tao20cfe882014-04-02 15:13:26 +08001241 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001242
Paolo Bonzinia3161032012-11-14 15:54:48 +01001243 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001244 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001245 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001246 }
1247 }
Hu Tao20cfe882014-04-02 15:13:26 +08001248
1249 return NULL;
1250}
1251
1252void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1253{
1254 RAMBlock *new_block = find_ram_block(addr);
1255 RAMBlock *block;
1256
Avi Kivityc5705a72011-12-20 15:59:12 +02001257 assert(new_block);
1258 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001259
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001260 if (dev) {
1261 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001262 if (id) {
1263 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001264 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001265 }
1266 }
1267 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1268
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001269 /* This assumes the iothread lock is taken here too. */
1270 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001271 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001272 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001273 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1274 new_block->idstr);
1275 abort();
1276 }
1277 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001278 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001279}
1280
Hu Tao20cfe882014-04-02 15:13:26 +08001281void qemu_ram_unset_idstr(ram_addr_t addr)
1282{
1283 RAMBlock *block = find_ram_block(addr);
1284
1285 if (block) {
1286 memset(block->idstr, 0, sizeof(block->idstr));
1287 }
1288}
1289
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001290static int memory_try_enable_merging(void *addr, size_t len)
1291{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001292 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001293 /* disabled by the user */
1294 return 0;
1295 }
1296
1297 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1298}
1299
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001300/* Only legal before guest might have detected the memory size: e.g. on
1301 * incoming migration, or right after reset.
1302 *
1303 * As memory core doesn't know how is memory accessed, it is up to
1304 * resize callback to update device state and/or add assertions to detect
1305 * misuse, if necessary.
1306 */
1307int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1308{
1309 RAMBlock *block = find_ram_block(base);
1310
1311 assert(block);
1312
1313 if (block->used_length == newsize) {
1314 return 0;
1315 }
1316
1317 if (!(block->flags & RAM_RESIZEABLE)) {
1318 error_setg_errno(errp, EINVAL,
1319 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1320 " in != 0x" RAM_ADDR_FMT, block->idstr,
1321 newsize, block->used_length);
1322 return -EINVAL;
1323 }
1324
1325 if (block->max_length < newsize) {
1326 error_setg_errno(errp, EINVAL,
1327 "Length too large: %s: 0x" RAM_ADDR_FMT
1328 " > 0x" RAM_ADDR_FMT, block->idstr,
1329 newsize, block->max_length);
1330 return -EINVAL;
1331 }
1332
1333 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1334 block->used_length = newsize;
1335 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1336 memory_region_set_size(block->mr, newsize);
1337 if (block->resized) {
1338 block->resized(block->idstr, newsize, block->host);
1339 }
1340 return 0;
1341}
1342
Hu Taoef701d72014-09-09 13:27:54 +08001343static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001344{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001345 RAMBlock *block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001346 ram_addr_t old_ram_size, new_ram_size;
1347
1348 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001349
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001350 /* This assumes the iothread lock is taken here too. */
1351 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001352 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001353
1354 if (!new_block->host) {
1355 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001356 xen_ram_alloc(new_block->offset, new_block->max_length,
1357 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001358 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001359 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001360 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001361 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001362 error_setg_errno(errp, errno,
1363 "cannot set up guest memory '%s'",
1364 memory_region_name(new_block->mr));
1365 qemu_mutex_unlock_ramlist();
1366 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001367 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001368 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001369 }
1370 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001371
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001372 /* Keep the list sorted from biggest to smallest block. */
1373 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001374 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001375 break;
1376 }
1377 }
1378 if (block) {
1379 QTAILQ_INSERT_BEFORE(block, new_block, next);
1380 } else {
1381 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1382 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001383 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001384
Umesh Deshpandef798b072011-08-18 11:41:17 -07001385 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001386 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001387
Juan Quintela2152f5c2013-10-08 13:52:02 +02001388 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1389
1390 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001391 int i;
1392 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1393 ram_list.dirty_memory[i] =
1394 bitmap_zero_extend(ram_list.dirty_memory[i],
1395 old_ram_size, new_ram_size);
1396 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001397 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001398 cpu_physical_memory_set_dirty_range(new_block->offset,
1399 new_block->used_length);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001400
Paolo Bonzinia904c912015-01-21 16:18:35 +01001401 if (new_block->host) {
1402 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1403 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1404 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1405 if (kvm_enabled()) {
1406 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1407 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001408 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001409
1410 return new_block->offset;
1411}
1412
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001413#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001414ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001415 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001416 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001417{
1418 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001419 ram_addr_t addr;
1420 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001421
1422 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001423 error_setg(errp, "-mem-path not supported with Xen");
1424 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001425 }
1426
1427 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1428 /*
1429 * file_ram_alloc() needs to allocate just like
1430 * phys_mem_alloc, but we haven't bothered to provide
1431 * a hook there.
1432 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001433 error_setg(errp,
1434 "-mem-path not supported with this accelerator");
1435 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001436 }
1437
1438 size = TARGET_PAGE_ALIGN(size);
1439 new_block = g_malloc0(sizeof(*new_block));
1440 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001441 new_block->used_length = size;
1442 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001443 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001444 new_block->host = file_ram_alloc(new_block, size,
1445 mem_path, errp);
1446 if (!new_block->host) {
1447 g_free(new_block);
1448 return -1;
1449 }
1450
Hu Taoef701d72014-09-09 13:27:54 +08001451 addr = ram_block_add(new_block, &local_err);
1452 if (local_err) {
1453 g_free(new_block);
1454 error_propagate(errp, local_err);
1455 return -1;
1456 }
1457 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001458}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001459#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001460
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001461static
1462ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1463 void (*resized)(const char*,
1464 uint64_t length,
1465 void *host),
1466 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001467 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001468{
1469 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001470 ram_addr_t addr;
1471 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001472
1473 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001474 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001475 new_block = g_malloc0(sizeof(*new_block));
1476 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001477 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001478 new_block->used_length = size;
1479 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001480 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001481 new_block->fd = -1;
1482 new_block->host = host;
1483 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001484 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001485 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001486 if (resizeable) {
1487 new_block->flags |= RAM_RESIZEABLE;
1488 }
Hu Taoef701d72014-09-09 13:27:54 +08001489 addr = ram_block_add(new_block, &local_err);
1490 if (local_err) {
1491 g_free(new_block);
1492 error_propagate(errp, local_err);
1493 return -1;
1494 }
1495 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001496}
1497
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001498ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1499 MemoryRegion *mr, Error **errp)
1500{
1501 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1502}
1503
Hu Taoef701d72014-09-09 13:27:54 +08001504ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001505{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001506 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1507}
1508
1509ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1510 void (*resized)(const char*,
1511 uint64_t length,
1512 void *host),
1513 MemoryRegion *mr, Error **errp)
1514{
1515 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001516}
bellarde9a1ab12007-02-08 23:08:38 +00001517
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001518void qemu_ram_free_from_ptr(ram_addr_t addr)
1519{
1520 RAMBlock *block;
1521
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001522 /* This assumes the iothread lock is taken here too. */
1523 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001524 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001525 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001526 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001527 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001528 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001529 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001530 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001531 }
1532 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001533 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001534}
1535
Anthony Liguoric227f092009-10-01 16:12:16 -05001536void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001537{
Alex Williamson04b16652010-07-02 11:13:17 -06001538 RAMBlock *block;
1539
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001540 /* This assumes the iothread lock is taken here too. */
1541 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001542 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001543 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001544 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001545 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001546 ram_list.version++;
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001547 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001548 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001549 } else if (xen_enabled()) {
1550 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001551#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001552 } else if (block->fd >= 0) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001553 munmap(block->host, block->max_length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001554 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001555#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001556 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001557 qemu_anon_ram_free(block->host, block->max_length);
Alex Williamson04b16652010-07-02 11:13:17 -06001558 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001559 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001560 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001561 }
1562 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001563 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001564
bellarde9a1ab12007-02-08 23:08:38 +00001565}
1566
Huang Yingcd19cfa2011-03-02 08:56:19 +01001567#ifndef _WIN32
1568void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1569{
1570 RAMBlock *block;
1571 ram_addr_t offset;
1572 int flags;
1573 void *area, *vaddr;
1574
Paolo Bonzinia3161032012-11-14 15:54:48 +01001575 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001576 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001577 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001578 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001579 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001580 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001581 } else if (xen_enabled()) {
1582 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001583 } else {
1584 flags = MAP_FIXED;
1585 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001586 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001587 flags |= (block->flags & RAM_SHARED ?
1588 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001589 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1590 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001591 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001592 /*
1593 * Remap needs to match alloc. Accelerators that
1594 * set phys_mem_alloc never remap. If they did,
1595 * we'd need a remap hook here.
1596 */
1597 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1598
Huang Yingcd19cfa2011-03-02 08:56:19 +01001599 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1600 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1601 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001602 }
1603 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001604 fprintf(stderr, "Could not remap addr: "
1605 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001606 length, addr);
1607 exit(1);
1608 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001609 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001610 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001611 }
1612 return;
1613 }
1614 }
1615}
1616#endif /* !_WIN32 */
1617
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001618int qemu_get_ram_fd(ram_addr_t addr)
1619{
1620 RAMBlock *block = qemu_get_ram_block(addr);
1621
1622 return block->fd;
1623}
1624
Damjan Marion3fd74b82014-06-26 23:01:32 +02001625void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1626{
1627 RAMBlock *block = qemu_get_ram_block(addr);
1628
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001629 return ramblock_ptr(block, 0);
Damjan Marion3fd74b82014-06-26 23:01:32 +02001630}
1631
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001632/* Return a host pointer to ram allocated with qemu_ram_alloc.
1633 With the exception of the softmmu code in this file, this should
1634 only be used for local memory (e.g. video ram) that the device owns,
1635 and knows it isn't going to access beyond the end of the block.
1636
1637 It should not be used for general purpose DMA.
1638 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1639 */
1640void *qemu_get_ram_ptr(ram_addr_t addr)
1641{
1642 RAMBlock *block = qemu_get_ram_block(addr);
1643
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001644 if (xen_enabled()) {
1645 /* We need to check if the requested address is in the RAM
1646 * because we don't want to map the entire memory in QEMU.
1647 * In that case just map until the end of the page.
1648 */
1649 if (block->offset == 0) {
1650 return xen_map_cache(addr, 0, 0);
1651 } else if (block->host == NULL) {
1652 block->host =
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001653 xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001654 }
1655 }
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001656 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001657}
1658
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001659/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1660 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001661static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001662{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001663 if (*size == 0) {
1664 return NULL;
1665 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001666 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001667 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001668 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001669 RAMBlock *block;
1670
Paolo Bonzinia3161032012-11-14 15:54:48 +01001671 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001672 if (addr - block->offset < block->max_length) {
1673 if (addr - block->offset + *size > block->max_length)
1674 *size = block->max_length - addr + block->offset;
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001675 return ramblock_ptr(block, addr - block->offset);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001676 }
1677 }
1678
1679 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1680 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001681 }
1682}
1683
Paolo Bonzini7443b432013-06-03 12:44:02 +02001684/* Some of the softmmu routines need to translate from a host pointer
1685 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001686MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001687{
pbrook94a6b542009-04-11 17:15:54 +00001688 RAMBlock *block;
1689 uint8_t *host = ptr;
1690
Jan Kiszka868bb332011-06-21 22:59:09 +02001691 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001692 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001693 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001694 }
1695
Paolo Bonzini23887b72013-05-06 14:28:39 +02001696 block = ram_list.mru_block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001697 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001698 goto found;
1699 }
1700
Paolo Bonzinia3161032012-11-14 15:54:48 +01001701 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001702 /* This case append when the block is not mapped. */
1703 if (block->host == NULL) {
1704 continue;
1705 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001706 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001707 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001708 }
pbrook94a6b542009-04-11 17:15:54 +00001709 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001710
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001711 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001712
1713found:
1714 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001715 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001716}
Alex Williamsonf471a172010-06-11 11:11:42 -06001717
Avi Kivitya8170e52012-10-23 12:30:10 +02001718static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001719 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001720{
Juan Quintela52159192013-10-08 12:44:04 +02001721 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001722 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001723 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001724 switch (size) {
1725 case 1:
1726 stb_p(qemu_get_ram_ptr(ram_addr), val);
1727 break;
1728 case 2:
1729 stw_p(qemu_get_ram_ptr(ram_addr), val);
1730 break;
1731 case 4:
1732 stl_p(qemu_get_ram_ptr(ram_addr), val);
1733 break;
1734 default:
1735 abort();
1736 }
Paolo Bonzini68868672014-07-21 16:45:18 +02001737 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
bellardf23db162005-08-21 19:12:28 +00001738 /* we remove the notdirty callback only if the code has been
1739 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001740 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001741 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001742 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001743 }
bellard1ccde1c2004-02-06 19:46:14 +00001744}
1745
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001746static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1747 unsigned size, bool is_write)
1748{
1749 return is_write;
1750}
1751
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001752static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001753 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001754 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001755 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001756};
1757
pbrook0f459d12008-06-09 00:20:13 +00001758/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell05068c02014-09-12 14:06:48 +01001759static void check_watchpoint(int offset, int len, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001760{
Andreas Färber93afead2013-08-26 03:41:01 +02001761 CPUState *cpu = current_cpu;
1762 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001763 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001764 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001765 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001766 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001767
Andreas Färberff4700b2013-08-26 18:23:18 +02001768 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001769 /* We re-entered the check after replacing the TB. Now raise
1770 * the debug interrupt so that is will trigger after the
1771 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001772 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001773 return;
1774 }
Andreas Färber93afead2013-08-26 03:41:01 +02001775 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001776 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001777 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1778 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001779 if (flags == BP_MEM_READ) {
1780 wp->flags |= BP_WATCHPOINT_HIT_READ;
1781 } else {
1782 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1783 }
1784 wp->hitaddr = vaddr;
Andreas Färberff4700b2013-08-26 18:23:18 +02001785 if (!cpu->watchpoint_hit) {
1786 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001787 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001788 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001789 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001790 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001791 } else {
1792 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001793 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001794 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001795 }
aliguori06d55cc2008-11-18 20:24:06 +00001796 }
aliguori6e140f22008-11-18 20:37:55 +00001797 } else {
1798 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001799 }
1800 }
1801}
1802
pbrook6658ffb2007-03-16 23:58:11 +00001803/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1804 so these check for a hit then pass through to the normal out-of-line
1805 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001806static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001807 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001808{
Peter Maydell05068c02014-09-12 14:06:48 +01001809 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001810 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001811 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001812 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001813 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001814 default: abort();
1815 }
pbrook6658ffb2007-03-16 23:58:11 +00001816}
1817
Avi Kivitya8170e52012-10-23 12:30:10 +02001818static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001819 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001820{
Peter Maydell05068c02014-09-12 14:06:48 +01001821 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001822 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001823 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001824 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001825 break;
1826 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001827 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001828 break;
1829 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001830 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001831 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001832 default: abort();
1833 }
pbrook6658ffb2007-03-16 23:58:11 +00001834}
1835
Avi Kivity1ec9b902012-01-02 12:47:48 +02001836static const MemoryRegionOps watch_mem_ops = {
1837 .read = watch_mem_read,
1838 .write = watch_mem_write,
1839 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001840};
pbrook6658ffb2007-03-16 23:58:11 +00001841
Avi Kivitya8170e52012-10-23 12:30:10 +02001842static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001843 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001844{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001845 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001846 uint8_t buf[8];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001847
blueswir1db7b5422007-05-26 17:36:03 +00001848#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001849 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001850 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001851#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001852 address_space_read(subpage->as, addr + subpage->base, buf, len);
1853 switch (len) {
1854 case 1:
1855 return ldub_p(buf);
1856 case 2:
1857 return lduw_p(buf);
1858 case 4:
1859 return ldl_p(buf);
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001860 case 8:
1861 return ldq_p(buf);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001862 default:
1863 abort();
1864 }
blueswir1db7b5422007-05-26 17:36:03 +00001865}
1866
Avi Kivitya8170e52012-10-23 12:30:10 +02001867static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001868 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001869{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001870 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001871 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001872
blueswir1db7b5422007-05-26 17:36:03 +00001873#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001874 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001875 " value %"PRIx64"\n",
1876 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001877#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001878 switch (len) {
1879 case 1:
1880 stb_p(buf, value);
1881 break;
1882 case 2:
1883 stw_p(buf, value);
1884 break;
1885 case 4:
1886 stl_p(buf, value);
1887 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001888 case 8:
1889 stq_p(buf, value);
1890 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001891 default:
1892 abort();
1893 }
1894 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001895}
1896
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001897static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001898 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001899{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001900 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001901#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001902 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001903 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001904#endif
1905
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001906 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001907 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001908}
1909
Avi Kivity70c68e42012-01-02 12:32:48 +02001910static const MemoryRegionOps subpage_ops = {
1911 .read = subpage_read,
1912 .write = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001913 .impl.min_access_size = 1,
1914 .impl.max_access_size = 8,
1915 .valid.min_access_size = 1,
1916 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001917 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001918 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001919};
1920
Anthony Liguoric227f092009-10-01 16:12:16 -05001921static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001922 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001923{
1924 int idx, eidx;
1925
1926 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1927 return -1;
1928 idx = SUBPAGE_IDX(start);
1929 eidx = SUBPAGE_IDX(end);
1930#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001931 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1932 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001933#endif
blueswir1db7b5422007-05-26 17:36:03 +00001934 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001935 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001936 }
1937
1938 return 0;
1939}
1940
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001941static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001942{
Anthony Liguoric227f092009-10-01 16:12:16 -05001943 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001944
Anthony Liguori7267c092011-08-20 22:09:37 -05001945 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001946
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001947 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001948 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001949 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001950 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001951 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001952#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001953 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1954 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001955#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001956 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001957
1958 return mmio;
1959}
1960
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001961static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1962 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001963{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001964 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02001965 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001966 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02001967 .mr = mr,
1968 .offset_within_address_space = 0,
1969 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001970 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001971 };
1972
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001973 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001974}
1975
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02001976MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001977{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001978 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
1979 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02001980
1981 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001982}
1983
Avi Kivitye9179ce2009-06-14 11:38:52 +03001984static void io_mem_init(void)
1985{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001986 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001987 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001988 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001989 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001990 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001991 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001992 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001993}
1994
Avi Kivityac1970f2012-10-03 16:22:53 +02001995static void mem_begin(MemoryListener *listener)
1996{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001997 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001998 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1999 uint16_t n;
2000
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002001 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002002 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002003 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002004 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002005 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002006 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002007 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002008 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002009
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002010 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002011 d->as = as;
2012 as->next_dispatch = d;
2013}
2014
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002015static void address_space_dispatch_free(AddressSpaceDispatch *d)
2016{
2017 phys_sections_free(&d->map);
2018 g_free(d);
2019}
2020
Paolo Bonzini00752702013-05-29 12:13:54 +02002021static void mem_commit(MemoryListener *listener)
2022{
2023 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002024 AddressSpaceDispatch *cur = as->dispatch;
2025 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002026
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002027 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002028
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002029 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002030 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002031 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002032 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002033}
2034
Avi Kivity1d711482012-10-02 18:54:45 +02002035static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002036{
Andreas Färber182735e2013-05-29 22:29:20 +02002037 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002038
2039 /* since each CPU stores ram addresses in its TLB cache, we must
2040 reset the modified entries */
2041 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002042 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002043 /* FIXME: Disentangle the cpu.h circular files deps so we can
2044 directly get the right CPU from listener. */
2045 if (cpu->tcg_as_listener != listener) {
2046 continue;
2047 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002048 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002049 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002050}
2051
Avi Kivity93632742012-02-08 16:54:16 +02002052static void core_log_global_start(MemoryListener *listener)
2053{
Juan Quintela981fdf22013-10-10 11:54:09 +02002054 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02002055}
2056
2057static void core_log_global_stop(MemoryListener *listener)
2058{
Juan Quintela981fdf22013-10-10 11:54:09 +02002059 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02002060}
2061
Avi Kivity93632742012-02-08 16:54:16 +02002062static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02002063 .log_global_start = core_log_global_start,
2064 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02002065 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02002066};
2067
Avi Kivityac1970f2012-10-03 16:22:53 +02002068void address_space_init_dispatch(AddressSpace *as)
2069{
Paolo Bonzini00752702013-05-29 12:13:54 +02002070 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002071 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002072 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002073 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002074 .region_add = mem_add,
2075 .region_nop = mem_add,
2076 .priority = 0,
2077 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002078 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002079}
2080
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002081void address_space_unregister(AddressSpace *as)
2082{
2083 memory_listener_unregister(&as->dispatch_listener);
2084}
2085
Avi Kivity83f3c252012-10-07 12:59:55 +02002086void address_space_destroy_dispatch(AddressSpace *as)
2087{
2088 AddressSpaceDispatch *d = as->dispatch;
2089
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002090 atomic_rcu_set(&as->dispatch, NULL);
2091 if (d) {
2092 call_rcu(d, address_space_dispatch_free, rcu);
2093 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002094}
2095
Avi Kivity62152b82011-07-26 14:26:14 +03002096static void memory_map_init(void)
2097{
Anthony Liguori7267c092011-08-20 22:09:37 -05002098 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002099
Paolo Bonzini57271d62013-11-07 17:14:37 +01002100 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002101 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002102
Anthony Liguori7267c092011-08-20 22:09:37 -05002103 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002104 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2105 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002106 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02002107
Avi Kivityf6790af2012-10-02 20:13:51 +02002108 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03002109}
2110
2111MemoryRegion *get_system_memory(void)
2112{
2113 return system_memory;
2114}
2115
Avi Kivity309cb472011-08-08 16:09:03 +03002116MemoryRegion *get_system_io(void)
2117{
2118 return system_io;
2119}
2120
pbrooke2eef172008-06-08 01:09:01 +00002121#endif /* !defined(CONFIG_USER_ONLY) */
2122
bellard13eb76e2004-01-24 15:23:36 +00002123/* physical memory access (slow version, mainly for debug) */
2124#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002125int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002126 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002127{
2128 int l, flags;
2129 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002130 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002131
2132 while (len > 0) {
2133 page = addr & TARGET_PAGE_MASK;
2134 l = (page + TARGET_PAGE_SIZE) - addr;
2135 if (l > len)
2136 l = len;
2137 flags = page_get_flags(page);
2138 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002139 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002140 if (is_write) {
2141 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002142 return -1;
bellard579a97f2007-11-11 14:26:47 +00002143 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002144 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002145 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002146 memcpy(p, buf, l);
2147 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002148 } else {
2149 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002150 return -1;
bellard579a97f2007-11-11 14:26:47 +00002151 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002152 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002153 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002154 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002155 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002156 }
2157 len -= l;
2158 buf += l;
2159 addr += l;
2160 }
Paul Brooka68fe892010-03-01 00:08:59 +00002161 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002162}
bellard8df1cd02005-01-28 22:37:22 +00002163
bellard13eb76e2004-01-24 15:23:36 +00002164#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002165
Avi Kivitya8170e52012-10-23 12:30:10 +02002166static void invalidate_and_set_dirty(hwaddr addr,
2167 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002168{
Peter Maydellf874bf92014-11-16 19:44:21 +00002169 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2170 tb_invalidate_phys_range(addr, addr + length, 0);
Paolo Bonzini68868672014-07-21 16:45:18 +02002171 cpu_physical_memory_set_dirty_range_nocode(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002172 }
Anthony PERARDe2269392012-10-03 13:49:22 +00002173 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002174}
2175
Richard Henderson23326162013-07-08 14:55:59 -07002176static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002177{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002178 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002179
2180 /* Regions are assumed to support 1-4 byte accesses unless
2181 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002182 if (access_size_max == 0) {
2183 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002184 }
Richard Henderson23326162013-07-08 14:55:59 -07002185
2186 /* Bound the maximum access by the alignment of the address. */
2187 if (!mr->ops->impl.unaligned) {
2188 unsigned align_size_max = addr & -addr;
2189 if (align_size_max != 0 && align_size_max < access_size_max) {
2190 access_size_max = align_size_max;
2191 }
2192 }
2193
2194 /* Don't attempt accesses larger than the maximum. */
2195 if (l > access_size_max) {
2196 l = access_size_max;
2197 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002198 if (l & (l - 1)) {
2199 l = 1 << (qemu_fls(l) - 1);
2200 }
Richard Henderson23326162013-07-08 14:55:59 -07002201
2202 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002203}
2204
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002205bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002206 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002207{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002208 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002209 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002210 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002211 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002212 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002213 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002214
bellard13eb76e2004-01-24 15:23:36 +00002215 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002216 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002217 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002218
bellard13eb76e2004-01-24 15:23:36 +00002219 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002220 if (!memory_access_is_direct(mr, is_write)) {
2221 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002222 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002223 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002224 switch (l) {
2225 case 8:
2226 /* 64 bit write access */
2227 val = ldq_p(buf);
2228 error |= io_mem_write(mr, addr1, val, 8);
2229 break;
2230 case 4:
bellard1c213d12005-09-03 10:49:04 +00002231 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002232 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002233 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002234 break;
2235 case 2:
bellard1c213d12005-09-03 10:49:04 +00002236 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002237 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002238 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002239 break;
2240 case 1:
bellard1c213d12005-09-03 10:49:04 +00002241 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002242 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002243 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002244 break;
2245 default:
2246 abort();
bellard13eb76e2004-01-24 15:23:36 +00002247 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002248 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002249 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002250 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002251 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002252 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002253 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002254 }
2255 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002256 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002257 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002258 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002259 switch (l) {
2260 case 8:
2261 /* 64 bit read access */
2262 error |= io_mem_read(mr, addr1, &val, 8);
2263 stq_p(buf, val);
2264 break;
2265 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002266 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002267 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002268 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002269 break;
2270 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002271 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002272 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002273 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002274 break;
2275 case 1:
bellard1c213d12005-09-03 10:49:04 +00002276 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002277 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002278 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002279 break;
2280 default:
2281 abort();
bellard13eb76e2004-01-24 15:23:36 +00002282 }
2283 } else {
2284 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002285 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002286 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002287 }
2288 }
2289 len -= l;
2290 buf += l;
2291 addr += l;
2292 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002293
2294 return error;
bellard13eb76e2004-01-24 15:23:36 +00002295}
bellard8df1cd02005-01-28 22:37:22 +00002296
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002297bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002298 const uint8_t *buf, int len)
2299{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002300 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002301}
2302
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002303bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002304{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002305 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002306}
2307
2308
Avi Kivitya8170e52012-10-23 12:30:10 +02002309void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002310 int len, int is_write)
2311{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002312 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002313}
2314
Alexander Graf582b55a2013-12-11 14:17:44 +01002315enum write_rom_type {
2316 WRITE_DATA,
2317 FLUSH_CACHE,
2318};
2319
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002320static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002321 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002322{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002323 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002324 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002325 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002326 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002327
bellardd0ecd2a2006-04-23 17:14:48 +00002328 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002329 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002330 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002331
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002332 if (!(memory_region_is_ram(mr) ||
2333 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002334 /* do nothing */
2335 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002336 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002337 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002338 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002339 switch (type) {
2340 case WRITE_DATA:
2341 memcpy(ptr, buf, l);
2342 invalidate_and_set_dirty(addr1, l);
2343 break;
2344 case FLUSH_CACHE:
2345 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2346 break;
2347 }
bellardd0ecd2a2006-04-23 17:14:48 +00002348 }
2349 len -= l;
2350 buf += l;
2351 addr += l;
2352 }
2353}
2354
Alexander Graf582b55a2013-12-11 14:17:44 +01002355/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002356void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002357 const uint8_t *buf, int len)
2358{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002359 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002360}
2361
2362void cpu_flush_icache_range(hwaddr start, int len)
2363{
2364 /*
2365 * This function should do the same thing as an icache flush that was
2366 * triggered from within the guest. For TCG we are always cache coherent,
2367 * so there is no need to flush anything. For KVM / Xen we need to flush
2368 * the host's instruction cache at least.
2369 */
2370 if (tcg_enabled()) {
2371 return;
2372 }
2373
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002374 cpu_physical_memory_write_rom_internal(&address_space_memory,
2375 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002376}
2377
aliguori6d16c2f2009-01-22 16:59:11 +00002378typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002379 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002380 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002381 hwaddr addr;
2382 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002383} BounceBuffer;
2384
2385static BounceBuffer bounce;
2386
aliguoriba223c22009-01-22 16:59:16 +00002387typedef struct MapClient {
2388 void *opaque;
2389 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002390 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002391} MapClient;
2392
Blue Swirl72cf2d42009-09-12 07:36:22 +00002393static QLIST_HEAD(map_client_list, MapClient) map_client_list
2394 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002395
2396void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2397{
Anthony Liguori7267c092011-08-20 22:09:37 -05002398 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002399
2400 client->opaque = opaque;
2401 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002402 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002403 return client;
2404}
2405
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002406static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002407{
2408 MapClient *client = (MapClient *)_client;
2409
Blue Swirl72cf2d42009-09-12 07:36:22 +00002410 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002411 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002412}
2413
2414static void cpu_notify_map_clients(void)
2415{
2416 MapClient *client;
2417
Blue Swirl72cf2d42009-09-12 07:36:22 +00002418 while (!QLIST_EMPTY(&map_client_list)) {
2419 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002420 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002421 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002422 }
2423}
2424
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002425bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2426{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002427 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002428 hwaddr l, xlat;
2429
2430 while (len > 0) {
2431 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002432 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2433 if (!memory_access_is_direct(mr, is_write)) {
2434 l = memory_access_size(mr, l, addr);
2435 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002436 return false;
2437 }
2438 }
2439
2440 len -= l;
2441 addr += l;
2442 }
2443 return true;
2444}
2445
aliguori6d16c2f2009-01-22 16:59:11 +00002446/* Map a physical memory region into a host virtual address.
2447 * May map a subset of the requested range, given by and returned in *plen.
2448 * May return NULL if resources needed to perform the mapping are exhausted.
2449 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002450 * Use cpu_register_map_client() to know when retrying the map operation is
2451 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002452 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002453void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002454 hwaddr addr,
2455 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002456 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002457{
Avi Kivitya8170e52012-10-23 12:30:10 +02002458 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002459 hwaddr done = 0;
2460 hwaddr l, xlat, base;
2461 MemoryRegion *mr, *this_mr;
2462 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002463
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002464 if (len == 0) {
2465 return NULL;
2466 }
aliguori6d16c2f2009-01-22 16:59:11 +00002467
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002468 l = len;
2469 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2470 if (!memory_access_is_direct(mr, is_write)) {
2471 if (bounce.buffer) {
2472 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002473 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002474 /* Avoid unbounded allocations */
2475 l = MIN(l, TARGET_PAGE_SIZE);
2476 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002477 bounce.addr = addr;
2478 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002479
2480 memory_region_ref(mr);
2481 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002482 if (!is_write) {
2483 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002484 }
aliguori6d16c2f2009-01-22 16:59:11 +00002485
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002486 *plen = l;
2487 return bounce.buffer;
2488 }
2489
2490 base = xlat;
2491 raddr = memory_region_get_ram_addr(mr);
2492
2493 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002494 len -= l;
2495 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002496 done += l;
2497 if (len == 0) {
2498 break;
2499 }
2500
2501 l = len;
2502 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2503 if (this_mr != mr || xlat != base + done) {
2504 break;
2505 }
aliguori6d16c2f2009-01-22 16:59:11 +00002506 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002507
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002508 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002509 *plen = done;
2510 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002511}
2512
Avi Kivityac1970f2012-10-03 16:22:53 +02002513/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002514 * Will also mark the memory as dirty if is_write == 1. access_len gives
2515 * the amount of memory that was actually read or written by the caller.
2516 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002517void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2518 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002519{
2520 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002521 MemoryRegion *mr;
2522 ram_addr_t addr1;
2523
2524 mr = qemu_ram_addr_from_host(buffer, &addr1);
2525 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002526 if (is_write) {
Paolo Bonzini68868672014-07-21 16:45:18 +02002527 invalidate_and_set_dirty(addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002528 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002529 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002530 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002531 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002532 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002533 return;
2534 }
2535 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002536 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002537 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002538 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002539 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002540 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002541 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002542}
bellardd0ecd2a2006-04-23 17:14:48 +00002543
Avi Kivitya8170e52012-10-23 12:30:10 +02002544void *cpu_physical_memory_map(hwaddr addr,
2545 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002546 int is_write)
2547{
2548 return address_space_map(&address_space_memory, addr, plen, is_write);
2549}
2550
Avi Kivitya8170e52012-10-23 12:30:10 +02002551void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2552 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002553{
2554 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2555}
2556
bellard8df1cd02005-01-28 22:37:22 +00002557/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002558static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002559 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002560{
bellard8df1cd02005-01-28 22:37:22 +00002561 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002562 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002563 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002564 hwaddr l = 4;
2565 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002566
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002567 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002568 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002569 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002570 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002571#if defined(TARGET_WORDS_BIGENDIAN)
2572 if (endian == DEVICE_LITTLE_ENDIAN) {
2573 val = bswap32(val);
2574 }
2575#else
2576 if (endian == DEVICE_BIG_ENDIAN) {
2577 val = bswap32(val);
2578 }
2579#endif
bellard8df1cd02005-01-28 22:37:22 +00002580 } else {
2581 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002582 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002583 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002584 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002585 switch (endian) {
2586 case DEVICE_LITTLE_ENDIAN:
2587 val = ldl_le_p(ptr);
2588 break;
2589 case DEVICE_BIG_ENDIAN:
2590 val = ldl_be_p(ptr);
2591 break;
2592 default:
2593 val = ldl_p(ptr);
2594 break;
2595 }
bellard8df1cd02005-01-28 22:37:22 +00002596 }
2597 return val;
2598}
2599
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002600uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002601{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002602 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002603}
2604
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002605uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002606{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002607 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002608}
2609
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002610uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002611{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002612 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002613}
2614
bellard84b7b8e2005-11-28 21:19:04 +00002615/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002616static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002617 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002618{
bellard84b7b8e2005-11-28 21:19:04 +00002619 uint8_t *ptr;
2620 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002621 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002622 hwaddr l = 8;
2623 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002624
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002625 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002626 false);
2627 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002628 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002629 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002630#if defined(TARGET_WORDS_BIGENDIAN)
2631 if (endian == DEVICE_LITTLE_ENDIAN) {
2632 val = bswap64(val);
2633 }
2634#else
2635 if (endian == DEVICE_BIG_ENDIAN) {
2636 val = bswap64(val);
2637 }
2638#endif
bellard84b7b8e2005-11-28 21:19:04 +00002639 } else {
2640 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002641 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002642 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002643 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002644 switch (endian) {
2645 case DEVICE_LITTLE_ENDIAN:
2646 val = ldq_le_p(ptr);
2647 break;
2648 case DEVICE_BIG_ENDIAN:
2649 val = ldq_be_p(ptr);
2650 break;
2651 default:
2652 val = ldq_p(ptr);
2653 break;
2654 }
bellard84b7b8e2005-11-28 21:19:04 +00002655 }
2656 return val;
2657}
2658
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002659uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002660{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002661 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002662}
2663
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002664uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002665{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002666 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002667}
2668
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002669uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002670{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002671 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002672}
2673
bellardaab33092005-10-30 20:48:42 +00002674/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002675uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002676{
2677 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002678 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002679 return val;
2680}
2681
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002682/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002683static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002684 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002685{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002686 uint8_t *ptr;
2687 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002688 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002689 hwaddr l = 2;
2690 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002691
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002692 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002693 false);
2694 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002695 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002696 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002697#if defined(TARGET_WORDS_BIGENDIAN)
2698 if (endian == DEVICE_LITTLE_ENDIAN) {
2699 val = bswap16(val);
2700 }
2701#else
2702 if (endian == DEVICE_BIG_ENDIAN) {
2703 val = bswap16(val);
2704 }
2705#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002706 } else {
2707 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002708 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002709 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002710 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002711 switch (endian) {
2712 case DEVICE_LITTLE_ENDIAN:
2713 val = lduw_le_p(ptr);
2714 break;
2715 case DEVICE_BIG_ENDIAN:
2716 val = lduw_be_p(ptr);
2717 break;
2718 default:
2719 val = lduw_p(ptr);
2720 break;
2721 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002722 }
2723 return val;
bellardaab33092005-10-30 20:48:42 +00002724}
2725
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002726uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002727{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002728 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002729}
2730
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002731uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002732{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002733 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002734}
2735
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002736uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002737{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002738 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002739}
2740
bellard8df1cd02005-01-28 22:37:22 +00002741/* warning: addr must be aligned. The ram page is not masked as dirty
2742 and the code inside is not invalidated. It is useful if the dirty
2743 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002744void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002745{
bellard8df1cd02005-01-28 22:37:22 +00002746 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002747 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002748 hwaddr l = 4;
2749 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002750
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002751 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002752 true);
2753 if (l < 4 || !memory_access_is_direct(mr, true)) {
2754 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002755 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002756 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002757 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002758 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002759
2760 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002761 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002762 /* invalidate code */
2763 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2764 /* set dirty bit */
Paolo Bonzini68868672014-07-21 16:45:18 +02002765 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
aliguori74576192008-10-06 14:02:03 +00002766 }
2767 }
bellard8df1cd02005-01-28 22:37:22 +00002768 }
2769}
2770
2771/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002772static inline void stl_phys_internal(AddressSpace *as,
2773 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002774 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002775{
bellard8df1cd02005-01-28 22:37:22 +00002776 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002777 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002778 hwaddr l = 4;
2779 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002780
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002781 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002782 true);
2783 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002784#if defined(TARGET_WORDS_BIGENDIAN)
2785 if (endian == DEVICE_LITTLE_ENDIAN) {
2786 val = bswap32(val);
2787 }
2788#else
2789 if (endian == DEVICE_BIG_ENDIAN) {
2790 val = bswap32(val);
2791 }
2792#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002793 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002794 } else {
bellard8df1cd02005-01-28 22:37:22 +00002795 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002796 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002797 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002798 switch (endian) {
2799 case DEVICE_LITTLE_ENDIAN:
2800 stl_le_p(ptr, val);
2801 break;
2802 case DEVICE_BIG_ENDIAN:
2803 stl_be_p(ptr, val);
2804 break;
2805 default:
2806 stl_p(ptr, val);
2807 break;
2808 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002809 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002810 }
2811}
2812
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002813void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002814{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002815 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002816}
2817
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002818void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002819{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002820 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002821}
2822
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002823void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002824{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002825 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002826}
2827
bellardaab33092005-10-30 20:48:42 +00002828/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002829void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002830{
2831 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002832 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002833}
2834
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002835/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002836static inline void stw_phys_internal(AddressSpace *as,
2837 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002838 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002839{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002840 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002841 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002842 hwaddr l = 2;
2843 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002844
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002845 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002846 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002847#if defined(TARGET_WORDS_BIGENDIAN)
2848 if (endian == DEVICE_LITTLE_ENDIAN) {
2849 val = bswap16(val);
2850 }
2851#else
2852 if (endian == DEVICE_BIG_ENDIAN) {
2853 val = bswap16(val);
2854 }
2855#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002856 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002857 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002858 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002859 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002860 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002861 switch (endian) {
2862 case DEVICE_LITTLE_ENDIAN:
2863 stw_le_p(ptr, val);
2864 break;
2865 case DEVICE_BIG_ENDIAN:
2866 stw_be_p(ptr, val);
2867 break;
2868 default:
2869 stw_p(ptr, val);
2870 break;
2871 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002872 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002873 }
bellardaab33092005-10-30 20:48:42 +00002874}
2875
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002876void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002877{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002878 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002879}
2880
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002881void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002882{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002883 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002884}
2885
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002886void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002887{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002888 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002889}
2890
bellardaab33092005-10-30 20:48:42 +00002891/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002892void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002893{
2894 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002895 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002896}
2897
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002898void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002899{
2900 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002901 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002902}
2903
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002904void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002905{
2906 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002907 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002908}
2909
aliguori5e2972f2009-03-28 17:51:36 +00002910/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002911int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002912 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002913{
2914 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002915 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002916 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002917
2918 while (len > 0) {
2919 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002920 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002921 /* if no physical page mapped, return an error */
2922 if (phys_addr == -1)
2923 return -1;
2924 l = (page + TARGET_PAGE_SIZE) - addr;
2925 if (l > len)
2926 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002927 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002928 if (is_write) {
2929 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2930 } else {
2931 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2932 }
bellard13eb76e2004-01-24 15:23:36 +00002933 len -= l;
2934 buf += l;
2935 addr += l;
2936 }
2937 return 0;
2938}
Paul Brooka68fe892010-03-01 00:08:59 +00002939#endif
bellard13eb76e2004-01-24 15:23:36 +00002940
Blue Swirl8e4a4242013-01-06 18:30:17 +00002941/*
2942 * A helper function for the _utterly broken_ virtio device model to find out if
2943 * it's running on a big endian machine. Don't do this at home kids!
2944 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02002945bool target_words_bigendian(void);
2946bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00002947{
2948#if defined(TARGET_WORDS_BIGENDIAN)
2949 return true;
2950#else
2951 return false;
2952#endif
2953}
2954
Wen Congyang76f35532012-05-07 12:04:18 +08002955#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002956bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002957{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002958 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002959 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002960
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002961 mr = address_space_translate(&address_space_memory,
2962 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002963
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002964 return !(memory_region_is_ram(mr) ||
2965 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002966}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002967
2968void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2969{
2970 RAMBlock *block;
2971
2972 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02002973 func(block->host, block->offset, block->used_length, opaque);
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002974 }
2975}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002976#endif