blob: fe1e60a3b893e5aa28575f277cf56b2af3d00967 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060029#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020032#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010033#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/timer.h"
35#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020036#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020052#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020054#include "qemu/range.h"
55
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020059static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000060
Paolo Bonzinia3161032012-11-14 15:54:48 +010061RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030062
63static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030064static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030065
Avi Kivityf6790af2012-10-02 20:13:51 +020066AddressSpace address_space_io;
67AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020070static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080072/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080075/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020078/* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
80 */
81#define RAM_RESIZEABLE (1 << 2)
82
pbrooke2eef172008-06-08 01:09:01 +000083#endif
bellard9fa3e852004-01-04 18:06:42 +000084
Andreas Färberbdc44642013-06-24 23:50:24 +020085struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000086/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020088DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000089/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000090 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000091 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010092int use_icount;
bellard6a00d602005-11-21 23:25:50 +000093
pbrooke2eef172008-06-08 01:09:01 +000094#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020095
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020096typedef struct PhysPageEntry PhysPageEntry;
97
98struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020099 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200100 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200102 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103};
104
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
106
Paolo Bonzini03f49952013-11-07 17:14:36 +0100107/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100108#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100109
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200110#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100111#define P_L2_SIZE (1 << P_L2_BITS)
112
113#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
114
115typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200116
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200117typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124} PhysPageMap;
125
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200126struct AddressSpaceDispatch {
127 /* This is a multi-level map on the physical address space.
128 * The bottom level has pointers to MemoryRegionSections.
129 */
130 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200132 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133};
134
Jan Kiszka90260c62013-05-26 21:46:51 +0200135#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
136typedef struct subpage_t {
137 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200138 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200139 hwaddr base;
140 uint16_t sub_section[TARGET_PAGE_SIZE];
141} subpage_t;
142
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200143#define PHYS_SECTION_UNASSIGNED 0
144#define PHYS_SECTION_NOTDIRTY 1
145#define PHYS_SECTION_ROM 2
146#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200147
pbrooke2eef172008-06-08 01:09:01 +0000148static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300149static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000150static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000151
Avi Kivity1ec9b902012-01-02 12:47:48 +0200152static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000153#endif
bellard54936002003-05-13 00:25:15 +0000154
Paul Brook6d9a1302010-02-28 23:55:53 +0000155#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200156
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200157static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200158{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200159 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
160 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
161 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
162 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200163 }
164}
165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167{
168 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200169 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200170
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200171 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200172 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200173 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100174 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200175 map->nodes[ret][i].skip = 1;
176 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200177 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200178 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200179}
180
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
182 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200183 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184{
185 PhysPageEntry *p;
186 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100187 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200189 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200190 lp->ptr = phys_map_node_alloc(map);
191 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200194 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200195 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197 }
198 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200199 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200200 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100201 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200202
Paolo Bonzini03f49952013-11-07 17:14:36 +0100203 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200204 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200205 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200206 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200207 *index += step;
208 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200209 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200210 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200211 }
212 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 }
214}
215
Avi Kivityac1970f2012-10-03 16:22:53 +0200216static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200217 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200218 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000219{
Avi Kivity29990972012-02-13 20:21:20 +0200220 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200221 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000222
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200223 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000224}
225
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200226/* Compact a non leaf page entry. Simply detect that the entry has a single child,
227 * and update our entry so we can skip it and go directly to the destination.
228 */
229static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
230{
231 unsigned valid_ptr = P_L2_SIZE;
232 int valid = 0;
233 PhysPageEntry *p;
234 int i;
235
236 if (lp->ptr == PHYS_MAP_NODE_NIL) {
237 return;
238 }
239
240 p = nodes[lp->ptr];
241 for (i = 0; i < P_L2_SIZE; i++) {
242 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
243 continue;
244 }
245
246 valid_ptr = i;
247 valid++;
248 if (p[i].skip) {
249 phys_page_compact(&p[i], nodes, compacted);
250 }
251 }
252
253 /* We can only compress if there's only one child. */
254 if (valid != 1) {
255 return;
256 }
257
258 assert(valid_ptr < P_L2_SIZE);
259
260 /* Don't compress if it won't fit in the # of bits we have. */
261 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
262 return;
263 }
264
265 lp->ptr = p[valid_ptr].ptr;
266 if (!p[valid_ptr].skip) {
267 /* If our only child is a leaf, make this a leaf. */
268 /* By design, we should have made this node a leaf to begin with so we
269 * should never reach here.
270 * But since it's so simple to handle this, let's do it just in case we
271 * change this rule.
272 */
273 lp->skip = 0;
274 } else {
275 lp->skip += p[valid_ptr].skip;
276 }
277}
278
279static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
280{
281 DECLARE_BITMAP(compacted, nodes_nb);
282
283 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200284 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200285 }
286}
287
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200288static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200289 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000290{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200291 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200292 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200293 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200294
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200295 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200296 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200297 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200299 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100300 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200301 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200302
303 if (sections[lp.ptr].size.hi ||
304 range_covers_byte(sections[lp.ptr].offset_within_address_space,
305 sections[lp.ptr].size.lo, addr)) {
306 return &sections[lp.ptr];
307 } else {
308 return &sections[PHYS_SECTION_UNASSIGNED];
309 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200310}
311
Blue Swirle5548612012-04-21 13:08:33 +0000312bool memory_region_is_unassigned(MemoryRegion *mr)
313{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200314 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000315 && mr != &io_mem_watch;
316}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200317
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200318static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200319 hwaddr addr,
320 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200321{
Jan Kiszka90260c62013-05-26 21:46:51 +0200322 MemoryRegionSection *section;
323 subpage_t *subpage;
324
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200325 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200326 if (resolve_subpage && section->mr->subpage) {
327 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200328 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200329 }
330 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200331}
332
Jan Kiszka90260c62013-05-26 21:46:51 +0200333static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200334address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200336{
337 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100338 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200340 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200341 /* Compute offset within MemoryRegionSection */
342 addr -= section->offset_within_address_space;
343
344 /* Compute offset within MemoryRegion */
345 *xlat = addr + section->offset_within_region;
346
347 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100348 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 return section;
350}
Jan Kiszka90260c62013-05-26 21:46:51 +0200351
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100352static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
353{
354 if (memory_region_is_ram(mr)) {
355 return !(is_write && mr->readonly);
356 }
357 if (memory_region_is_romd(mr)) {
358 return !is_write;
359 }
360
361 return false;
362}
363
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200364MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
365 hwaddr *xlat, hwaddr *plen,
366 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200367{
Avi Kivity30951152012-10-30 13:47:46 +0200368 IOMMUTLBEntry iotlb;
369 MemoryRegionSection *section;
370 MemoryRegion *mr;
371 hwaddr len = *plen;
372
373 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100374 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200375 mr = section->mr;
376
377 if (!mr->iommu_ops) {
378 break;
379 }
380
Le Tan8d7b8cb2014-08-16 13:55:37 +0800381 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200382 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
383 | (addr & iotlb.addr_mask));
384 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
385 if (!(iotlb.perm & (1 << is_write))) {
386 mr = &io_mem_unassigned;
387 break;
388 }
389
390 as = iotlb.target_as;
391 }
392
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000393 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100394 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
395 len = MIN(page, len);
396 }
397
Avi Kivity30951152012-10-30 13:47:46 +0200398 *plen = len;
399 *xlat = addr;
400 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200401}
402
403MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200404address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
405 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200406{
Avi Kivity30951152012-10-30 13:47:46 +0200407 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200408 section = address_space_translate_internal(cpu->memory_dispatch,
409 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200410
411 assert(!section->mr->iommu_ops);
412 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200413}
bellard9fa3e852004-01-04 18:06:42 +0000414#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000415
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200416void cpu_exec_init_all(void)
417{
418#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700419 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200420 memory_map_init();
421 io_mem_init();
422#endif
423}
424
Andreas Färberb170fce2013-01-20 20:23:22 +0100425#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000426
Juan Quintelae59fb372009-09-29 22:48:21 +0200427static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200428{
Andreas Färber259186a2013-01-17 18:51:17 +0100429 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200430
aurel323098dba2009-03-07 21:28:24 +0000431 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
432 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100433 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100434 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000435
436 return 0;
437}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200438
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400439static int cpu_common_pre_load(void *opaque)
440{
441 CPUState *cpu = opaque;
442
Paolo Bonziniadee6422014-12-19 12:53:14 +0100443 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400444
445 return 0;
446}
447
448static bool cpu_common_exception_index_needed(void *opaque)
449{
450 CPUState *cpu = opaque;
451
Paolo Bonziniadee6422014-12-19 12:53:14 +0100452 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400453}
454
455static const VMStateDescription vmstate_cpu_common_exception_index = {
456 .name = "cpu_common/exception_index",
457 .version_id = 1,
458 .minimum_version_id = 1,
459 .fields = (VMStateField[]) {
460 VMSTATE_INT32(exception_index, CPUState),
461 VMSTATE_END_OF_LIST()
462 }
463};
464
Andreas Färber1a1562f2013-06-17 04:09:11 +0200465const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200466 .name = "cpu_common",
467 .version_id = 1,
468 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400469 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200470 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200471 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100472 VMSTATE_UINT32(halted, CPUState),
473 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 },
476 .subsections = (VMStateSubsection[]) {
477 {
478 .vmsd = &vmstate_cpu_common_exception_index,
479 .needed = cpu_common_exception_index_needed,
480 } , {
481 /* empty */
482 }
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200483 }
484};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200485
pbrook9656f322008-07-01 20:01:19 +0000486#endif
487
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100488CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400489{
Andreas Färberbdc44642013-06-24 23:50:24 +0200490 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400491
Andreas Färberbdc44642013-06-24 23:50:24 +0200492 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100493 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200494 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100495 }
Glauber Costa950f1472009-06-09 12:15:18 -0400496 }
497
Andreas Färberbdc44642013-06-24 23:50:24 +0200498 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400499}
500
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000501#if !defined(CONFIG_USER_ONLY)
502void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
503{
504 /* We only support one address space per cpu at the moment. */
505 assert(cpu->as == as);
506
507 if (cpu->tcg_as_listener) {
508 memory_listener_unregister(cpu->tcg_as_listener);
509 } else {
510 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
511 }
512 cpu->tcg_as_listener->commit = tcg_commit;
513 memory_listener_register(cpu->tcg_as_listener, as);
514}
515#endif
516
Andreas Färber9349b4f2012-03-14 01:38:32 +0100517void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000518{
Andreas Färber9f09e182012-05-03 06:59:07 +0200519 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100520 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200521 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000522 int cpu_index;
523
pbrookc2764712009-03-07 15:24:59 +0000524#if defined(CONFIG_USER_ONLY)
525 cpu_list_lock();
526#endif
bellard6a00d602005-11-21 23:25:50 +0000527 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200528 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000529 cpu_index++;
530 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100531 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100532 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200533 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200534 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100535#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000536 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200537 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100538#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200539 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000540#if defined(CONFIG_USER_ONLY)
541 cpu_list_unlock();
542#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200543 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
544 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
545 }
pbrookb3c77242008-06-30 16:31:04 +0000546#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600547 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000548 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100549 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200550 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000551#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100552 if (cc->vmsd != NULL) {
553 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
554 }
bellardfd6ce8f2003-05-14 19:00:11 +0000555}
556
Paul Brook94df27f2010-02-28 23:47:45 +0000557#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000559{
560 tb_invalidate_phys_page_range(pc, pc + 1, 0);
561}
562#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200563static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400564{
Max Filippove8262a12013-09-27 22:29:17 +0400565 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
566 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100568 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400569 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400570}
bellardc27004e2005-01-03 23:35:10 +0000571#endif
bellardd720b932004-04-25 17:57:43 +0000572
Paul Brookc527ee82010-03-01 03:31:14 +0000573#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200574void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000575
576{
577}
578
Peter Maydell3ee887e2014-09-12 14:06:48 +0100579int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
580 int flags)
581{
582 return -ENOSYS;
583}
584
585void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
586{
587}
588
Andreas Färber75a34032013-09-02 16:57:02 +0200589int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000590 int flags, CPUWatchpoint **watchpoint)
591{
592 return -ENOSYS;
593}
594#else
pbrook6658ffb2007-03-16 23:58:11 +0000595/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200596int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000597 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000598{
aliguoric0ce9982008-11-25 22:13:57 +0000599 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000600
Peter Maydell05068c02014-09-12 14:06:48 +0100601 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700602 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200603 error_report("tried to set invalid watchpoint at %"
604 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000605 return -EINVAL;
606 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500607 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000608
aliguoria1d1bb32008-11-18 20:07:32 +0000609 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100610 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000611 wp->flags = flags;
612
aliguori2dc9f412008-11-18 20:56:59 +0000613 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200614 if (flags & BP_GDB) {
615 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
616 } else {
617 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
618 }
aliguoria1d1bb32008-11-18 20:07:32 +0000619
Andreas Färber31b030d2013-09-04 01:29:02 +0200620 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000621
622 if (watchpoint)
623 *watchpoint = wp;
624 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000625}
626
aliguoria1d1bb32008-11-18 20:07:32 +0000627/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200628int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000629 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000630{
aliguoria1d1bb32008-11-18 20:07:32 +0000631 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000632
Andreas Färberff4700b2013-08-26 18:23:18 +0200633 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100634 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000635 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200636 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000637 return 0;
638 }
639 }
aliguoria1d1bb32008-11-18 20:07:32 +0000640 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000641}
642
aliguoria1d1bb32008-11-18 20:07:32 +0000643/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200644void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000645{
Andreas Färberff4700b2013-08-26 18:23:18 +0200646 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000647
Andreas Färber31b030d2013-09-04 01:29:02 +0200648 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000649
Anthony Liguori7267c092011-08-20 22:09:37 -0500650 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000651}
652
aliguoria1d1bb32008-11-18 20:07:32 +0000653/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200654void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000655{
aliguoric0ce9982008-11-25 22:13:57 +0000656 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000657
Andreas Färberff4700b2013-08-26 18:23:18 +0200658 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200659 if (wp->flags & mask) {
660 cpu_watchpoint_remove_by_ref(cpu, wp);
661 }
aliguoric0ce9982008-11-25 22:13:57 +0000662 }
aliguoria1d1bb32008-11-18 20:07:32 +0000663}
Peter Maydell05068c02014-09-12 14:06:48 +0100664
665/* Return true if this watchpoint address matches the specified
666 * access (ie the address range covered by the watchpoint overlaps
667 * partially or completely with the address range covered by the
668 * access).
669 */
670static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
671 vaddr addr,
672 vaddr len)
673{
674 /* We know the lengths are non-zero, but a little caution is
675 * required to avoid errors in the case where the range ends
676 * exactly at the top of the address space and so addr + len
677 * wraps round to zero.
678 */
679 vaddr wpend = wp->vaddr + wp->len - 1;
680 vaddr addrend = addr + len - 1;
681
682 return !(addr > wpend || wp->vaddr > addrend);
683}
684
Paul Brookc527ee82010-03-01 03:31:14 +0000685#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000686
687/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200688int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000689 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000690{
aliguoric0ce9982008-11-25 22:13:57 +0000691 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000692
Anthony Liguori7267c092011-08-20 22:09:37 -0500693 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000694
695 bp->pc = pc;
696 bp->flags = flags;
697
aliguori2dc9f412008-11-18 20:56:59 +0000698 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200699 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200700 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200701 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200702 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200703 }
aliguoria1d1bb32008-11-18 20:07:32 +0000704
Andreas Färberf0c3c502013-08-26 21:22:53 +0200705 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000706
Andreas Färber00b941e2013-06-29 18:55:54 +0200707 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000708 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200709 }
aliguoria1d1bb32008-11-18 20:07:32 +0000710 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000711}
712
713/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200714int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000715{
aliguoria1d1bb32008-11-18 20:07:32 +0000716 CPUBreakpoint *bp;
717
Andreas Färberf0c3c502013-08-26 21:22:53 +0200718 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000719 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200720 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000721 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000722 }
bellard4c3a88a2003-07-26 12:06:08 +0000723 }
aliguoria1d1bb32008-11-18 20:07:32 +0000724 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000725}
726
aliguoria1d1bb32008-11-18 20:07:32 +0000727/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200728void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000729{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200730 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
731
732 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000733
Anthony Liguori7267c092011-08-20 22:09:37 -0500734 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000735}
736
737/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200738void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000739{
aliguoric0ce9982008-11-25 22:13:57 +0000740 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000741
Andreas Färberf0c3c502013-08-26 21:22:53 +0200742 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200743 if (bp->flags & mask) {
744 cpu_breakpoint_remove_by_ref(cpu, bp);
745 }
aliguoric0ce9982008-11-25 22:13:57 +0000746 }
bellard4c3a88a2003-07-26 12:06:08 +0000747}
748
bellardc33a3462003-07-29 20:50:33 +0000749/* enable or disable single step mode. EXCP_DEBUG is returned by the
750 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200751void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000752{
Andreas Färbered2803d2013-06-21 20:20:45 +0200753 if (cpu->singlestep_enabled != enabled) {
754 cpu->singlestep_enabled = enabled;
755 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200756 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200757 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100758 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000759 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200760 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000761 tb_flush(env);
762 }
bellardc33a3462003-07-29 20:50:33 +0000763 }
bellardc33a3462003-07-29 20:50:33 +0000764}
765
Andreas Färbera47dddd2013-09-03 17:38:47 +0200766void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000767{
768 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000769 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000770
771 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000772 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000773 fprintf(stderr, "qemu: fatal: ");
774 vfprintf(stderr, fmt, ap);
775 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200776 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000777 if (qemu_log_enabled()) {
778 qemu_log("qemu: fatal: ");
779 qemu_log_vprintf(fmt, ap2);
780 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200781 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000782 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000783 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000784 }
pbrook493ae1f2007-11-23 16:53:59 +0000785 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000786 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200787#if defined(CONFIG_USER_ONLY)
788 {
789 struct sigaction act;
790 sigfillset(&act.sa_mask);
791 act.sa_handler = SIG_DFL;
792 sigaction(SIGABRT, &act, NULL);
793 }
794#endif
bellard75012672003-06-21 13:11:07 +0000795 abort();
796}
797
bellard01243112004-01-04 15:48:17 +0000798#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200799static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
800{
801 RAMBlock *block;
802
803 /* The list is protected by the iothread lock here. */
804 block = ram_list.mru_block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200805 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200806 goto found;
807 }
808 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200809 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200810 goto found;
811 }
812 }
813
814 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
815 abort();
816
817found:
818 ram_list.mru_block = block;
819 return block;
820}
821
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200822static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000823{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200824 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200825 RAMBlock *block;
826 ram_addr_t end;
827
828 end = TARGET_PAGE_ALIGN(start + length);
829 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000830
Paolo Bonzini041603f2013-09-09 17:49:45 +0200831 block = qemu_get_ram_block(start);
832 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200833 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000834 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200835}
836
837/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200838void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200839 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200840{
Juan Quintelad24981d2012-05-22 00:42:40 +0200841 if (length == 0)
842 return;
Michael S. Tsirkinc8d6f662014-11-17 17:54:07 +0200843 cpu_physical_memory_clear_dirty_range_type(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200844
845 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200846 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200847 }
bellard1ccde1c2004-02-06 19:46:14 +0000848}
849
Juan Quintela981fdf22013-10-10 11:54:09 +0200850static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000851{
852 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000853}
854
Andreas Färberbb0e6272013-09-03 13:32:01 +0200855hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200856 MemoryRegionSection *section,
857 target_ulong vaddr,
858 hwaddr paddr, hwaddr xlat,
859 int prot,
860 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000861{
Avi Kivitya8170e52012-10-23 12:30:10 +0200862 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000863 CPUWatchpoint *wp;
864
Blue Swirlcc5bea62012-04-14 14:56:48 +0000865 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000866 /* Normal RAM. */
867 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200868 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000869 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200870 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000871 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200872 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000873 }
874 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100875 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200876 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000877 }
878
879 /* Make accesses to pages with watchpoints go via the
880 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200881 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100882 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000883 /* Avoid trapping reads of pages with a write breakpoint. */
884 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200885 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000886 *address |= TLB_MMIO;
887 break;
888 }
889 }
890 }
891
892 return iotlb;
893}
bellard9fa3e852004-01-04 18:06:42 +0000894#endif /* defined(CONFIG_USER_ONLY) */
895
pbrooke2eef172008-06-08 01:09:01 +0000896#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000897
Anthony Liguoric227f092009-10-01 16:12:16 -0500898static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200899 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200900static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200901
Igor Mammedova2b257d2014-10-31 16:38:37 +0000902static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
903 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200904
905/*
906 * Set a custom physical guest memory alloator.
907 * Accelerators with unusual needs may need this. Hopefully, we can
908 * get rid of it eventually.
909 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000910void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200911{
912 phys_mem_alloc = alloc;
913}
914
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200915static uint16_t phys_section_add(PhysPageMap *map,
916 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200917{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200918 /* The physical section number is ORed with a page-aligned
919 * pointer to produce the iotlb entries. Thus it should
920 * never overflow into the page-aligned value.
921 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200922 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200923
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200924 if (map->sections_nb == map->sections_nb_alloc) {
925 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
926 map->sections = g_renew(MemoryRegionSection, map->sections,
927 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200928 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200929 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200930 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200931 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200932}
933
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200934static void phys_section_destroy(MemoryRegion *mr)
935{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200936 memory_region_unref(mr);
937
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200938 if (mr->subpage) {
939 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700940 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200941 g_free(subpage);
942 }
943}
944
Paolo Bonzini60926662013-05-29 12:30:26 +0200945static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200946{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200947 while (map->sections_nb > 0) {
948 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200949 phys_section_destroy(section->mr);
950 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200951 g_free(map->sections);
952 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200953}
954
Avi Kivityac1970f2012-10-03 16:22:53 +0200955static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200956{
957 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200958 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200959 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200960 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200961 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200962 MemoryRegionSection subsection = {
963 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200964 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200965 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200966 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200967
Avi Kivityf3705d52012-03-08 16:16:34 +0200968 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200969
Avi Kivityf3705d52012-03-08 16:16:34 +0200970 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200971 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100972 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200973 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200974 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200975 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200976 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200977 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200978 }
979 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200980 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200981 subpage_register(subpage, start, end,
982 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200983}
984
985
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200986static void register_multipage(AddressSpaceDispatch *d,
987 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000988{
Avi Kivitya8170e52012-10-23 12:30:10 +0200989 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200990 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200991 uint64_t num_pages = int128_get64(int128_rshift(section->size,
992 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200993
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200994 assert(num_pages);
995 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000996}
997
Avi Kivityac1970f2012-10-03 16:22:53 +0200998static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200999{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001000 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001001 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001002 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001003 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001004
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001005 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1006 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1007 - now.offset_within_address_space;
1008
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001009 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001010 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001011 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001012 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001013 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001014 while (int128_ne(remain.size, now.size)) {
1015 remain.size = int128_sub(remain.size, now.size);
1016 remain.offset_within_address_space += int128_get64(now.size);
1017 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001018 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001019 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001020 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001021 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001022 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001023 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001024 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001025 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001026 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001027 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001028 }
1029}
1030
Sheng Yang62a27442010-01-26 19:21:16 +08001031void qemu_flush_coalesced_mmio_buffer(void)
1032{
1033 if (kvm_enabled())
1034 kvm_flush_coalesced_mmio_buffer();
1035}
1036
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001037void qemu_mutex_lock_ramlist(void)
1038{
1039 qemu_mutex_lock(&ram_list.mutex);
1040}
1041
1042void qemu_mutex_unlock_ramlist(void)
1043{
1044 qemu_mutex_unlock(&ram_list.mutex);
1045}
1046
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001047#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001048
1049#include <sys/vfs.h>
1050
1051#define HUGETLBFS_MAGIC 0x958458f6
1052
Hu Taofc7a5802014-09-09 13:28:01 +08001053static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001054{
1055 struct statfs fs;
1056 int ret;
1057
1058 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001059 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001060 } while (ret != 0 && errno == EINTR);
1061
1062 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001063 error_setg_errno(errp, errno, "failed to get page size of file %s",
1064 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001065 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001066 }
1067
1068 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001069 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001070
1071 return fs.f_bsize;
1072}
1073
Alex Williamson04b16652010-07-02 11:13:17 -06001074static void *file_ram_alloc(RAMBlock *block,
1075 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001076 const char *path,
1077 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001078{
1079 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001080 char *sanitized_name;
1081 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001082 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001083 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001084 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001085 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001086
Hu Taofc7a5802014-09-09 13:28:01 +08001087 hpagesize = gethugepagesize(path, &local_err);
1088 if (local_err) {
1089 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001090 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001091 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001092 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001093
1094 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001095 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1096 "or larger than huge page size 0x%" PRIx64,
1097 memory, hpagesize);
1098 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001099 }
1100
1101 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001102 error_setg(errp,
1103 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001104 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001105 }
1106
Peter Feiner8ca761f2013-03-04 13:54:25 -05001107 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001108 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001109 for (c = sanitized_name; *c != '\0'; c++) {
1110 if (*c == '/')
1111 *c = '_';
1112 }
1113
1114 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1115 sanitized_name);
1116 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001117
1118 fd = mkstemp(filename);
1119 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001120 error_setg_errno(errp, errno,
1121 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001122 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001123 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001124 }
1125 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001126 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001127
1128 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1129
1130 /*
1131 * ftruncate is not supported by hugetlbfs in older
1132 * hosts, so don't bother bailing out on errors.
1133 * If anything goes wrong with it under other filesystems,
1134 * mmap will fail.
1135 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001136 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001137 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001138 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001139
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001140 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1141 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1142 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001143 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001144 error_setg_errno(errp, errno,
1145 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001146 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001147 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001148 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001149
1150 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001151 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001152 }
1153
Alex Williamson04b16652010-07-02 11:13:17 -06001154 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001155 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001156
1157error:
1158 if (mem_prealloc) {
Luiz Capitulinoe4d9df42014-09-08 13:50:05 -04001159 error_report("%s\n", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001160 exit(1);
1161 }
1162 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001163}
1164#endif
1165
Alex Williamsond17b5282010-06-25 11:08:38 -06001166static ram_addr_t find_ram_offset(ram_addr_t size)
1167{
Alex Williamson04b16652010-07-02 11:13:17 -06001168 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001169 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001170
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001171 assert(size != 0); /* it would hand out same offset multiple times */
1172
Paolo Bonzinia3161032012-11-14 15:54:48 +01001173 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001174 return 0;
1175
Paolo Bonzinia3161032012-11-14 15:54:48 +01001176 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001177 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001178
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001179 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001180
Paolo Bonzinia3161032012-11-14 15:54:48 +01001181 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001182 if (next_block->offset >= end) {
1183 next = MIN(next, next_block->offset);
1184 }
1185 }
1186 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001187 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001188 mingap = next - end;
1189 }
1190 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001191
1192 if (offset == RAM_ADDR_MAX) {
1193 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1194 (uint64_t)size);
1195 abort();
1196 }
1197
Alex Williamson04b16652010-07-02 11:13:17 -06001198 return offset;
1199}
1200
Juan Quintela652d7ec2012-07-20 10:37:54 +02001201ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001202{
Alex Williamsond17b5282010-06-25 11:08:38 -06001203 RAMBlock *block;
1204 ram_addr_t last = 0;
1205
Paolo Bonzinia3161032012-11-14 15:54:48 +01001206 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001207 last = MAX(last, block->offset + block->max_length);
Alex Williamsond17b5282010-06-25 11:08:38 -06001208
1209 return last;
1210}
1211
Jason Baronddb97f12012-08-02 15:44:16 -04001212static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1213{
1214 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001215
1216 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001217 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1218 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001219 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1220 if (ret) {
1221 perror("qemu_madvise");
1222 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1223 "but dump_guest_core=off specified\n");
1224 }
1225 }
1226}
1227
Hu Tao20cfe882014-04-02 15:13:26 +08001228static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001229{
Hu Tao20cfe882014-04-02 15:13:26 +08001230 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001231
Paolo Bonzinia3161032012-11-14 15:54:48 +01001232 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001233 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001234 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001235 }
1236 }
Hu Tao20cfe882014-04-02 15:13:26 +08001237
1238 return NULL;
1239}
1240
1241void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1242{
1243 RAMBlock *new_block = find_ram_block(addr);
1244 RAMBlock *block;
1245
Avi Kivityc5705a72011-12-20 15:59:12 +02001246 assert(new_block);
1247 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001248
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001249 if (dev) {
1250 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001251 if (id) {
1252 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001253 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001254 }
1255 }
1256 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1257
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001258 /* This assumes the iothread lock is taken here too. */
1259 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001260 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001261 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001262 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1263 new_block->idstr);
1264 abort();
1265 }
1266 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001267 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001268}
1269
Hu Tao20cfe882014-04-02 15:13:26 +08001270void qemu_ram_unset_idstr(ram_addr_t addr)
1271{
1272 RAMBlock *block = find_ram_block(addr);
1273
1274 if (block) {
1275 memset(block->idstr, 0, sizeof(block->idstr));
1276 }
1277}
1278
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001279static int memory_try_enable_merging(void *addr, size_t len)
1280{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001281 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001282 /* disabled by the user */
1283 return 0;
1284 }
1285
1286 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1287}
1288
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001289/* Only legal before guest might have detected the memory size: e.g. on
1290 * incoming migration, or right after reset.
1291 *
1292 * As memory core doesn't know how is memory accessed, it is up to
1293 * resize callback to update device state and/or add assertions to detect
1294 * misuse, if necessary.
1295 */
1296int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1297{
1298 RAMBlock *block = find_ram_block(base);
1299
1300 assert(block);
1301
1302 if (block->used_length == newsize) {
1303 return 0;
1304 }
1305
1306 if (!(block->flags & RAM_RESIZEABLE)) {
1307 error_setg_errno(errp, EINVAL,
1308 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1309 " in != 0x" RAM_ADDR_FMT, block->idstr,
1310 newsize, block->used_length);
1311 return -EINVAL;
1312 }
1313
1314 if (block->max_length < newsize) {
1315 error_setg_errno(errp, EINVAL,
1316 "Length too large: %s: 0x" RAM_ADDR_FMT
1317 " > 0x" RAM_ADDR_FMT, block->idstr,
1318 newsize, block->max_length);
1319 return -EINVAL;
1320 }
1321
1322 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1323 block->used_length = newsize;
1324 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1325 memory_region_set_size(block->mr, newsize);
1326 if (block->resized) {
1327 block->resized(block->idstr, newsize, block->host);
1328 }
1329 return 0;
1330}
1331
Hu Taoef701d72014-09-09 13:27:54 +08001332static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001333{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001334 RAMBlock *block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001335 ram_addr_t old_ram_size, new_ram_size;
1336
1337 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001338
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001339 /* This assumes the iothread lock is taken here too. */
1340 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001341 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001342
1343 if (!new_block->host) {
1344 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001345 xen_ram_alloc(new_block->offset, new_block->max_length,
1346 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001347 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001348 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001349 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001350 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001351 error_setg_errno(errp, errno,
1352 "cannot set up guest memory '%s'",
1353 memory_region_name(new_block->mr));
1354 qemu_mutex_unlock_ramlist();
1355 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001356 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001357 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001358 }
1359 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001360
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001361 /* Keep the list sorted from biggest to smallest block. */
1362 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001363 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001364 break;
1365 }
1366 }
1367 if (block) {
1368 QTAILQ_INSERT_BEFORE(block, new_block, next);
1369 } else {
1370 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1371 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001372 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001373
Umesh Deshpandef798b072011-08-18 11:41:17 -07001374 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001375 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001376
Juan Quintela2152f5c2013-10-08 13:52:02 +02001377 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1378
1379 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001380 int i;
1381 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1382 ram_list.dirty_memory[i] =
1383 bitmap_zero_extend(ram_list.dirty_memory[i],
1384 old_ram_size, new_ram_size);
1385 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001386 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001387 cpu_physical_memory_set_dirty_range(new_block->offset,
1388 new_block->used_length);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001389
Paolo Bonzinia904c912015-01-21 16:18:35 +01001390 if (new_block->host) {
1391 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1392 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1393 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1394 if (kvm_enabled()) {
1395 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1396 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001397 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001398
1399 return new_block->offset;
1400}
1401
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001402#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001403ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001404 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001405 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001406{
1407 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001408 ram_addr_t addr;
1409 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001410
1411 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001412 error_setg(errp, "-mem-path not supported with Xen");
1413 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001414 }
1415
1416 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1417 /*
1418 * file_ram_alloc() needs to allocate just like
1419 * phys_mem_alloc, but we haven't bothered to provide
1420 * a hook there.
1421 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001422 error_setg(errp,
1423 "-mem-path not supported with this accelerator");
1424 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001425 }
1426
1427 size = TARGET_PAGE_ALIGN(size);
1428 new_block = g_malloc0(sizeof(*new_block));
1429 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001430 new_block->used_length = size;
1431 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001432 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001433 new_block->host = file_ram_alloc(new_block, size,
1434 mem_path, errp);
1435 if (!new_block->host) {
1436 g_free(new_block);
1437 return -1;
1438 }
1439
Hu Taoef701d72014-09-09 13:27:54 +08001440 addr = ram_block_add(new_block, &local_err);
1441 if (local_err) {
1442 g_free(new_block);
1443 error_propagate(errp, local_err);
1444 return -1;
1445 }
1446 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001447}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001448#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001449
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001450static
1451ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1452 void (*resized)(const char*,
1453 uint64_t length,
1454 void *host),
1455 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001456 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001457{
1458 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001459 ram_addr_t addr;
1460 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001461
1462 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001463 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001464 new_block = g_malloc0(sizeof(*new_block));
1465 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001466 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001467 new_block->used_length = size;
1468 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001469 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001470 new_block->fd = -1;
1471 new_block->host = host;
1472 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001473 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001474 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001475 if (resizeable) {
1476 new_block->flags |= RAM_RESIZEABLE;
1477 }
Hu Taoef701d72014-09-09 13:27:54 +08001478 addr = ram_block_add(new_block, &local_err);
1479 if (local_err) {
1480 g_free(new_block);
1481 error_propagate(errp, local_err);
1482 return -1;
1483 }
1484 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001485}
1486
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001487ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1488 MemoryRegion *mr, Error **errp)
1489{
1490 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1491}
1492
Hu Taoef701d72014-09-09 13:27:54 +08001493ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001494{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001495 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1496}
1497
1498ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1499 void (*resized)(const char*,
1500 uint64_t length,
1501 void *host),
1502 MemoryRegion *mr, Error **errp)
1503{
1504 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001505}
bellarde9a1ab12007-02-08 23:08:38 +00001506
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001507void qemu_ram_free_from_ptr(ram_addr_t addr)
1508{
1509 RAMBlock *block;
1510
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001511 /* This assumes the iothread lock is taken here too. */
1512 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001513 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001514 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001515 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001516 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001517 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001518 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001519 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001520 }
1521 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001522 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001523}
1524
Anthony Liguoric227f092009-10-01 16:12:16 -05001525void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001526{
Alex Williamson04b16652010-07-02 11:13:17 -06001527 RAMBlock *block;
1528
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001529 /* This assumes the iothread lock is taken here too. */
1530 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001531 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001532 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001533 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001534 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001535 ram_list.version++;
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001536 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001537 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001538 } else if (xen_enabled()) {
1539 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001540#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001541 } else if (block->fd >= 0) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001542 munmap(block->host, block->max_length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001543 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001544#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001545 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001546 qemu_anon_ram_free(block->host, block->max_length);
Alex Williamson04b16652010-07-02 11:13:17 -06001547 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001548 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001549 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001550 }
1551 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001552 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001553
bellarde9a1ab12007-02-08 23:08:38 +00001554}
1555
Huang Yingcd19cfa2011-03-02 08:56:19 +01001556#ifndef _WIN32
1557void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1558{
1559 RAMBlock *block;
1560 ram_addr_t offset;
1561 int flags;
1562 void *area, *vaddr;
1563
Paolo Bonzinia3161032012-11-14 15:54:48 +01001564 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001565 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001566 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001567 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001568 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001569 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001570 } else if (xen_enabled()) {
1571 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001572 } else {
1573 flags = MAP_FIXED;
1574 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001575 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001576 flags |= (block->flags & RAM_SHARED ?
1577 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001578 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1579 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001580 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001581 /*
1582 * Remap needs to match alloc. Accelerators that
1583 * set phys_mem_alloc never remap. If they did,
1584 * we'd need a remap hook here.
1585 */
1586 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1587
Huang Yingcd19cfa2011-03-02 08:56:19 +01001588 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1589 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1590 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001591 }
1592 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001593 fprintf(stderr, "Could not remap addr: "
1594 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001595 length, addr);
1596 exit(1);
1597 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001598 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001599 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001600 }
1601 return;
1602 }
1603 }
1604}
1605#endif /* !_WIN32 */
1606
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001607int qemu_get_ram_fd(ram_addr_t addr)
1608{
1609 RAMBlock *block = qemu_get_ram_block(addr);
1610
1611 return block->fd;
1612}
1613
Damjan Marion3fd74b82014-06-26 23:01:32 +02001614void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1615{
1616 RAMBlock *block = qemu_get_ram_block(addr);
1617
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001618 return ramblock_ptr(block, 0);
Damjan Marion3fd74b82014-06-26 23:01:32 +02001619}
1620
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001621/* Return a host pointer to ram allocated with qemu_ram_alloc.
1622 With the exception of the softmmu code in this file, this should
1623 only be used for local memory (e.g. video ram) that the device owns,
1624 and knows it isn't going to access beyond the end of the block.
1625
1626 It should not be used for general purpose DMA.
1627 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1628 */
1629void *qemu_get_ram_ptr(ram_addr_t addr)
1630{
1631 RAMBlock *block = qemu_get_ram_block(addr);
1632
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001633 if (xen_enabled()) {
1634 /* We need to check if the requested address is in the RAM
1635 * because we don't want to map the entire memory in QEMU.
1636 * In that case just map until the end of the page.
1637 */
1638 if (block->offset == 0) {
1639 return xen_map_cache(addr, 0, 0);
1640 } else if (block->host == NULL) {
1641 block->host =
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001642 xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001643 }
1644 }
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001645 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001646}
1647
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001648/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1649 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001650static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001651{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001652 if (*size == 0) {
1653 return NULL;
1654 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001655 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001656 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001657 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001658 RAMBlock *block;
1659
Paolo Bonzinia3161032012-11-14 15:54:48 +01001660 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001661 if (addr - block->offset < block->max_length) {
1662 if (addr - block->offset + *size > block->max_length)
1663 *size = block->max_length - addr + block->offset;
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001664 return ramblock_ptr(block, addr - block->offset);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001665 }
1666 }
1667
1668 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1669 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001670 }
1671}
1672
Paolo Bonzini7443b432013-06-03 12:44:02 +02001673/* Some of the softmmu routines need to translate from a host pointer
1674 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001675MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001676{
pbrook94a6b542009-04-11 17:15:54 +00001677 RAMBlock *block;
1678 uint8_t *host = ptr;
1679
Jan Kiszka868bb332011-06-21 22:59:09 +02001680 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001681 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001682 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001683 }
1684
Paolo Bonzini23887b72013-05-06 14:28:39 +02001685 block = ram_list.mru_block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001686 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001687 goto found;
1688 }
1689
Paolo Bonzinia3161032012-11-14 15:54:48 +01001690 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001691 /* This case append when the block is not mapped. */
1692 if (block->host == NULL) {
1693 continue;
1694 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001695 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001696 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001697 }
pbrook94a6b542009-04-11 17:15:54 +00001698 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001699
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001700 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001701
1702found:
1703 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001704 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001705}
Alex Williamsonf471a172010-06-11 11:11:42 -06001706
Avi Kivitya8170e52012-10-23 12:30:10 +02001707static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001708 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001709{
Juan Quintela52159192013-10-08 12:44:04 +02001710 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001711 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001712 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001713 switch (size) {
1714 case 1:
1715 stb_p(qemu_get_ram_ptr(ram_addr), val);
1716 break;
1717 case 2:
1718 stw_p(qemu_get_ram_ptr(ram_addr), val);
1719 break;
1720 case 4:
1721 stl_p(qemu_get_ram_ptr(ram_addr), val);
1722 break;
1723 default:
1724 abort();
1725 }
Paolo Bonzini68868672014-07-21 16:45:18 +02001726 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
bellardf23db162005-08-21 19:12:28 +00001727 /* we remove the notdirty callback only if the code has been
1728 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001729 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001730 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001731 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001732 }
bellard1ccde1c2004-02-06 19:46:14 +00001733}
1734
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001735static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1736 unsigned size, bool is_write)
1737{
1738 return is_write;
1739}
1740
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001741static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001742 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001743 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001744 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001745};
1746
pbrook0f459d12008-06-09 00:20:13 +00001747/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell05068c02014-09-12 14:06:48 +01001748static void check_watchpoint(int offset, int len, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001749{
Andreas Färber93afead2013-08-26 03:41:01 +02001750 CPUState *cpu = current_cpu;
1751 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001752 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001753 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001754 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001755 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001756
Andreas Färberff4700b2013-08-26 18:23:18 +02001757 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001758 /* We re-entered the check after replacing the TB. Now raise
1759 * the debug interrupt so that is will trigger after the
1760 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001761 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001762 return;
1763 }
Andreas Färber93afead2013-08-26 03:41:01 +02001764 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001765 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001766 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1767 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001768 if (flags == BP_MEM_READ) {
1769 wp->flags |= BP_WATCHPOINT_HIT_READ;
1770 } else {
1771 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1772 }
1773 wp->hitaddr = vaddr;
Andreas Färberff4700b2013-08-26 18:23:18 +02001774 if (!cpu->watchpoint_hit) {
1775 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001776 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001777 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001778 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001779 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001780 } else {
1781 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001782 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001783 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001784 }
aliguori06d55cc2008-11-18 20:24:06 +00001785 }
aliguori6e140f22008-11-18 20:37:55 +00001786 } else {
1787 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001788 }
1789 }
1790}
1791
pbrook6658ffb2007-03-16 23:58:11 +00001792/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1793 so these check for a hit then pass through to the normal out-of-line
1794 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001795static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001796 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001797{
Peter Maydell05068c02014-09-12 14:06:48 +01001798 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001799 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001800 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001801 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001802 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001803 default: abort();
1804 }
pbrook6658ffb2007-03-16 23:58:11 +00001805}
1806
Avi Kivitya8170e52012-10-23 12:30:10 +02001807static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001808 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001809{
Peter Maydell05068c02014-09-12 14:06:48 +01001810 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001811 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001812 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001813 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001814 break;
1815 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001816 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001817 break;
1818 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001819 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001820 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001821 default: abort();
1822 }
pbrook6658ffb2007-03-16 23:58:11 +00001823}
1824
Avi Kivity1ec9b902012-01-02 12:47:48 +02001825static const MemoryRegionOps watch_mem_ops = {
1826 .read = watch_mem_read,
1827 .write = watch_mem_write,
1828 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001829};
pbrook6658ffb2007-03-16 23:58:11 +00001830
Avi Kivitya8170e52012-10-23 12:30:10 +02001831static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001832 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001833{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001834 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001835 uint8_t buf[8];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001836
blueswir1db7b5422007-05-26 17:36:03 +00001837#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001838 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001839 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001840#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001841 address_space_read(subpage->as, addr + subpage->base, buf, len);
1842 switch (len) {
1843 case 1:
1844 return ldub_p(buf);
1845 case 2:
1846 return lduw_p(buf);
1847 case 4:
1848 return ldl_p(buf);
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001849 case 8:
1850 return ldq_p(buf);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001851 default:
1852 abort();
1853 }
blueswir1db7b5422007-05-26 17:36:03 +00001854}
1855
Avi Kivitya8170e52012-10-23 12:30:10 +02001856static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001857 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001858{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001859 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001860 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001861
blueswir1db7b5422007-05-26 17:36:03 +00001862#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001863 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001864 " value %"PRIx64"\n",
1865 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001866#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001867 switch (len) {
1868 case 1:
1869 stb_p(buf, value);
1870 break;
1871 case 2:
1872 stw_p(buf, value);
1873 break;
1874 case 4:
1875 stl_p(buf, value);
1876 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001877 case 8:
1878 stq_p(buf, value);
1879 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001880 default:
1881 abort();
1882 }
1883 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001884}
1885
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001886static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001887 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001888{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001889 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001890#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001891 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001892 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001893#endif
1894
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001895 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001896 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001897}
1898
Avi Kivity70c68e42012-01-02 12:32:48 +02001899static const MemoryRegionOps subpage_ops = {
1900 .read = subpage_read,
1901 .write = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001902 .impl.min_access_size = 1,
1903 .impl.max_access_size = 8,
1904 .valid.min_access_size = 1,
1905 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001906 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001907 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001908};
1909
Anthony Liguoric227f092009-10-01 16:12:16 -05001910static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001911 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001912{
1913 int idx, eidx;
1914
1915 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1916 return -1;
1917 idx = SUBPAGE_IDX(start);
1918 eidx = SUBPAGE_IDX(end);
1919#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001920 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1921 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001922#endif
blueswir1db7b5422007-05-26 17:36:03 +00001923 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001924 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001925 }
1926
1927 return 0;
1928}
1929
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001930static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001931{
Anthony Liguoric227f092009-10-01 16:12:16 -05001932 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001933
Anthony Liguori7267c092011-08-20 22:09:37 -05001934 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001935
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001936 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001937 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001938 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001939 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001940 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001941#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001942 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1943 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001944#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001945 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001946
1947 return mmio;
1948}
1949
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001950static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1951 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001952{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001953 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02001954 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001955 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02001956 .mr = mr,
1957 .offset_within_address_space = 0,
1958 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001959 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001960 };
1961
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001962 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001963}
1964
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02001965MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001966{
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02001967 MemoryRegionSection *sections = cpu->memory_dispatch->map.sections;
1968
1969 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001970}
1971
Avi Kivitye9179ce2009-06-14 11:38:52 +03001972static void io_mem_init(void)
1973{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001974 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001975 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001976 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001977 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001978 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001979 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001980 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001981}
1982
Avi Kivityac1970f2012-10-03 16:22:53 +02001983static void mem_begin(MemoryListener *listener)
1984{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001985 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001986 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1987 uint16_t n;
1988
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001989 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001990 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001991 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001992 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001993 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001994 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001995 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001996 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001997
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001998 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001999 d->as = as;
2000 as->next_dispatch = d;
2001}
2002
2003static void mem_commit(MemoryListener *listener)
2004{
2005 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002006 AddressSpaceDispatch *cur = as->dispatch;
2007 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002008
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002009 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002010
Paolo Bonzini0475d942013-05-29 12:28:21 +02002011 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02002012
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002013 if (cur) {
2014 phys_sections_free(&cur->map);
2015 g_free(cur);
2016 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002017}
2018
Avi Kivity1d711482012-10-02 18:54:45 +02002019static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002020{
Andreas Färber182735e2013-05-29 22:29:20 +02002021 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002022
2023 /* since each CPU stores ram addresses in its TLB cache, we must
2024 reset the modified entries */
2025 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002026 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002027 /* FIXME: Disentangle the cpu.h circular files deps so we can
2028 directly get the right CPU from listener. */
2029 if (cpu->tcg_as_listener != listener) {
2030 continue;
2031 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002032 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002033 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002034}
2035
Avi Kivity93632742012-02-08 16:54:16 +02002036static void core_log_global_start(MemoryListener *listener)
2037{
Juan Quintela981fdf22013-10-10 11:54:09 +02002038 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02002039}
2040
2041static void core_log_global_stop(MemoryListener *listener)
2042{
Juan Quintela981fdf22013-10-10 11:54:09 +02002043 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02002044}
2045
Avi Kivity93632742012-02-08 16:54:16 +02002046static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02002047 .log_global_start = core_log_global_start,
2048 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02002049 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02002050};
2051
Avi Kivityac1970f2012-10-03 16:22:53 +02002052void address_space_init_dispatch(AddressSpace *as)
2053{
Paolo Bonzini00752702013-05-29 12:13:54 +02002054 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002055 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002056 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002057 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002058 .region_add = mem_add,
2059 .region_nop = mem_add,
2060 .priority = 0,
2061 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002062 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002063}
2064
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002065void address_space_unregister(AddressSpace *as)
2066{
2067 memory_listener_unregister(&as->dispatch_listener);
2068}
2069
Avi Kivity83f3c252012-10-07 12:59:55 +02002070void address_space_destroy_dispatch(AddressSpace *as)
2071{
2072 AddressSpaceDispatch *d = as->dispatch;
2073
Avi Kivity83f3c252012-10-07 12:59:55 +02002074 g_free(d);
2075 as->dispatch = NULL;
2076}
2077
Avi Kivity62152b82011-07-26 14:26:14 +03002078static void memory_map_init(void)
2079{
Anthony Liguori7267c092011-08-20 22:09:37 -05002080 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002081
Paolo Bonzini57271d62013-11-07 17:14:37 +01002082 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002083 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002084
Anthony Liguori7267c092011-08-20 22:09:37 -05002085 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002086 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2087 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002088 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02002089
Avi Kivityf6790af2012-10-02 20:13:51 +02002090 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03002091}
2092
2093MemoryRegion *get_system_memory(void)
2094{
2095 return system_memory;
2096}
2097
Avi Kivity309cb472011-08-08 16:09:03 +03002098MemoryRegion *get_system_io(void)
2099{
2100 return system_io;
2101}
2102
pbrooke2eef172008-06-08 01:09:01 +00002103#endif /* !defined(CONFIG_USER_ONLY) */
2104
bellard13eb76e2004-01-24 15:23:36 +00002105/* physical memory access (slow version, mainly for debug) */
2106#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002107int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002108 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002109{
2110 int l, flags;
2111 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002112 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002113
2114 while (len > 0) {
2115 page = addr & TARGET_PAGE_MASK;
2116 l = (page + TARGET_PAGE_SIZE) - addr;
2117 if (l > len)
2118 l = len;
2119 flags = page_get_flags(page);
2120 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002121 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002122 if (is_write) {
2123 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002124 return -1;
bellard579a97f2007-11-11 14:26:47 +00002125 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002126 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002127 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002128 memcpy(p, buf, l);
2129 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002130 } else {
2131 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002132 return -1;
bellard579a97f2007-11-11 14:26:47 +00002133 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002134 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002135 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002136 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002137 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002138 }
2139 len -= l;
2140 buf += l;
2141 addr += l;
2142 }
Paul Brooka68fe892010-03-01 00:08:59 +00002143 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002144}
bellard8df1cd02005-01-28 22:37:22 +00002145
bellard13eb76e2004-01-24 15:23:36 +00002146#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002147
Avi Kivitya8170e52012-10-23 12:30:10 +02002148static void invalidate_and_set_dirty(hwaddr addr,
2149 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002150{
Peter Maydellf874bf92014-11-16 19:44:21 +00002151 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2152 tb_invalidate_phys_range(addr, addr + length, 0);
Paolo Bonzini68868672014-07-21 16:45:18 +02002153 cpu_physical_memory_set_dirty_range_nocode(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002154 }
Anthony PERARDe2269392012-10-03 13:49:22 +00002155 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002156}
2157
Richard Henderson23326162013-07-08 14:55:59 -07002158static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002159{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002160 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002161
2162 /* Regions are assumed to support 1-4 byte accesses unless
2163 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002164 if (access_size_max == 0) {
2165 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002166 }
Richard Henderson23326162013-07-08 14:55:59 -07002167
2168 /* Bound the maximum access by the alignment of the address. */
2169 if (!mr->ops->impl.unaligned) {
2170 unsigned align_size_max = addr & -addr;
2171 if (align_size_max != 0 && align_size_max < access_size_max) {
2172 access_size_max = align_size_max;
2173 }
2174 }
2175
2176 /* Don't attempt accesses larger than the maximum. */
2177 if (l > access_size_max) {
2178 l = access_size_max;
2179 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002180 if (l & (l - 1)) {
2181 l = 1 << (qemu_fls(l) - 1);
2182 }
Richard Henderson23326162013-07-08 14:55:59 -07002183
2184 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002185}
2186
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002187bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002188 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002189{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002190 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002191 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002192 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002193 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002194 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002195 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002196
bellard13eb76e2004-01-24 15:23:36 +00002197 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002198 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002199 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002200
bellard13eb76e2004-01-24 15:23:36 +00002201 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002202 if (!memory_access_is_direct(mr, is_write)) {
2203 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002204 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002205 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002206 switch (l) {
2207 case 8:
2208 /* 64 bit write access */
2209 val = ldq_p(buf);
2210 error |= io_mem_write(mr, addr1, val, 8);
2211 break;
2212 case 4:
bellard1c213d12005-09-03 10:49:04 +00002213 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002214 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002215 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002216 break;
2217 case 2:
bellard1c213d12005-09-03 10:49:04 +00002218 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002219 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002220 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002221 break;
2222 case 1:
bellard1c213d12005-09-03 10:49:04 +00002223 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002224 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002225 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002226 break;
2227 default:
2228 abort();
bellard13eb76e2004-01-24 15:23:36 +00002229 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002230 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002231 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002232 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002233 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002234 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002235 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002236 }
2237 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002238 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002239 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002240 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002241 switch (l) {
2242 case 8:
2243 /* 64 bit read access */
2244 error |= io_mem_read(mr, addr1, &val, 8);
2245 stq_p(buf, val);
2246 break;
2247 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002248 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002249 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002250 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002251 break;
2252 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002253 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002254 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002255 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002256 break;
2257 case 1:
bellard1c213d12005-09-03 10:49:04 +00002258 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002259 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002260 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002261 break;
2262 default:
2263 abort();
bellard13eb76e2004-01-24 15:23:36 +00002264 }
2265 } else {
2266 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002267 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002268 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002269 }
2270 }
2271 len -= l;
2272 buf += l;
2273 addr += l;
2274 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002275
2276 return error;
bellard13eb76e2004-01-24 15:23:36 +00002277}
bellard8df1cd02005-01-28 22:37:22 +00002278
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002279bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002280 const uint8_t *buf, int len)
2281{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002282 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002283}
2284
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002285bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002286{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002287 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002288}
2289
2290
Avi Kivitya8170e52012-10-23 12:30:10 +02002291void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002292 int len, int is_write)
2293{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002294 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002295}
2296
Alexander Graf582b55a2013-12-11 14:17:44 +01002297enum write_rom_type {
2298 WRITE_DATA,
2299 FLUSH_CACHE,
2300};
2301
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002302static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002303 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002304{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002305 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002306 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002307 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002308 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002309
bellardd0ecd2a2006-04-23 17:14:48 +00002310 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002311 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002312 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002313
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002314 if (!(memory_region_is_ram(mr) ||
2315 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002316 /* do nothing */
2317 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002318 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002319 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002320 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002321 switch (type) {
2322 case WRITE_DATA:
2323 memcpy(ptr, buf, l);
2324 invalidate_and_set_dirty(addr1, l);
2325 break;
2326 case FLUSH_CACHE:
2327 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2328 break;
2329 }
bellardd0ecd2a2006-04-23 17:14:48 +00002330 }
2331 len -= l;
2332 buf += l;
2333 addr += l;
2334 }
2335}
2336
Alexander Graf582b55a2013-12-11 14:17:44 +01002337/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002338void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002339 const uint8_t *buf, int len)
2340{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002341 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002342}
2343
2344void cpu_flush_icache_range(hwaddr start, int len)
2345{
2346 /*
2347 * This function should do the same thing as an icache flush that was
2348 * triggered from within the guest. For TCG we are always cache coherent,
2349 * so there is no need to flush anything. For KVM / Xen we need to flush
2350 * the host's instruction cache at least.
2351 */
2352 if (tcg_enabled()) {
2353 return;
2354 }
2355
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002356 cpu_physical_memory_write_rom_internal(&address_space_memory,
2357 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002358}
2359
aliguori6d16c2f2009-01-22 16:59:11 +00002360typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002361 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002362 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002363 hwaddr addr;
2364 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002365} BounceBuffer;
2366
2367static BounceBuffer bounce;
2368
aliguoriba223c22009-01-22 16:59:16 +00002369typedef struct MapClient {
2370 void *opaque;
2371 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002372 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002373} MapClient;
2374
Blue Swirl72cf2d42009-09-12 07:36:22 +00002375static QLIST_HEAD(map_client_list, MapClient) map_client_list
2376 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002377
2378void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2379{
Anthony Liguori7267c092011-08-20 22:09:37 -05002380 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002381
2382 client->opaque = opaque;
2383 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002384 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002385 return client;
2386}
2387
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002388static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002389{
2390 MapClient *client = (MapClient *)_client;
2391
Blue Swirl72cf2d42009-09-12 07:36:22 +00002392 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002393 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002394}
2395
2396static void cpu_notify_map_clients(void)
2397{
2398 MapClient *client;
2399
Blue Swirl72cf2d42009-09-12 07:36:22 +00002400 while (!QLIST_EMPTY(&map_client_list)) {
2401 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002402 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002403 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002404 }
2405}
2406
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002407bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2408{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002409 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002410 hwaddr l, xlat;
2411
2412 while (len > 0) {
2413 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002414 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2415 if (!memory_access_is_direct(mr, is_write)) {
2416 l = memory_access_size(mr, l, addr);
2417 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002418 return false;
2419 }
2420 }
2421
2422 len -= l;
2423 addr += l;
2424 }
2425 return true;
2426}
2427
aliguori6d16c2f2009-01-22 16:59:11 +00002428/* Map a physical memory region into a host virtual address.
2429 * May map a subset of the requested range, given by and returned in *plen.
2430 * May return NULL if resources needed to perform the mapping are exhausted.
2431 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002432 * Use cpu_register_map_client() to know when retrying the map operation is
2433 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002434 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002435void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002436 hwaddr addr,
2437 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002438 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002439{
Avi Kivitya8170e52012-10-23 12:30:10 +02002440 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002441 hwaddr done = 0;
2442 hwaddr l, xlat, base;
2443 MemoryRegion *mr, *this_mr;
2444 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002445
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002446 if (len == 0) {
2447 return NULL;
2448 }
aliguori6d16c2f2009-01-22 16:59:11 +00002449
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002450 l = len;
2451 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2452 if (!memory_access_is_direct(mr, is_write)) {
2453 if (bounce.buffer) {
2454 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002455 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002456 /* Avoid unbounded allocations */
2457 l = MIN(l, TARGET_PAGE_SIZE);
2458 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002459 bounce.addr = addr;
2460 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002461
2462 memory_region_ref(mr);
2463 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002464 if (!is_write) {
2465 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002466 }
aliguori6d16c2f2009-01-22 16:59:11 +00002467
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002468 *plen = l;
2469 return bounce.buffer;
2470 }
2471
2472 base = xlat;
2473 raddr = memory_region_get_ram_addr(mr);
2474
2475 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002476 len -= l;
2477 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002478 done += l;
2479 if (len == 0) {
2480 break;
2481 }
2482
2483 l = len;
2484 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2485 if (this_mr != mr || xlat != base + done) {
2486 break;
2487 }
aliguori6d16c2f2009-01-22 16:59:11 +00002488 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002489
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002490 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002491 *plen = done;
2492 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002493}
2494
Avi Kivityac1970f2012-10-03 16:22:53 +02002495/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002496 * Will also mark the memory as dirty if is_write == 1. access_len gives
2497 * the amount of memory that was actually read or written by the caller.
2498 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002499void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2500 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002501{
2502 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002503 MemoryRegion *mr;
2504 ram_addr_t addr1;
2505
2506 mr = qemu_ram_addr_from_host(buffer, &addr1);
2507 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002508 if (is_write) {
Paolo Bonzini68868672014-07-21 16:45:18 +02002509 invalidate_and_set_dirty(addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002510 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002511 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002512 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002513 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002514 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002515 return;
2516 }
2517 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002518 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002519 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002520 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002521 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002522 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002523 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002524}
bellardd0ecd2a2006-04-23 17:14:48 +00002525
Avi Kivitya8170e52012-10-23 12:30:10 +02002526void *cpu_physical_memory_map(hwaddr addr,
2527 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002528 int is_write)
2529{
2530 return address_space_map(&address_space_memory, addr, plen, is_write);
2531}
2532
Avi Kivitya8170e52012-10-23 12:30:10 +02002533void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2534 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002535{
2536 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2537}
2538
bellard8df1cd02005-01-28 22:37:22 +00002539/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002540static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002541 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002542{
bellard8df1cd02005-01-28 22:37:22 +00002543 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002544 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002545 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002546 hwaddr l = 4;
2547 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002548
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002549 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002550 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002551 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002552 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002553#if defined(TARGET_WORDS_BIGENDIAN)
2554 if (endian == DEVICE_LITTLE_ENDIAN) {
2555 val = bswap32(val);
2556 }
2557#else
2558 if (endian == DEVICE_BIG_ENDIAN) {
2559 val = bswap32(val);
2560 }
2561#endif
bellard8df1cd02005-01-28 22:37:22 +00002562 } else {
2563 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002564 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002565 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002566 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002567 switch (endian) {
2568 case DEVICE_LITTLE_ENDIAN:
2569 val = ldl_le_p(ptr);
2570 break;
2571 case DEVICE_BIG_ENDIAN:
2572 val = ldl_be_p(ptr);
2573 break;
2574 default:
2575 val = ldl_p(ptr);
2576 break;
2577 }
bellard8df1cd02005-01-28 22:37:22 +00002578 }
2579 return val;
2580}
2581
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002582uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002583{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002584 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002585}
2586
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002587uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002588{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002589 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002590}
2591
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002592uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002593{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002594 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002595}
2596
bellard84b7b8e2005-11-28 21:19:04 +00002597/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002598static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002599 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002600{
bellard84b7b8e2005-11-28 21:19:04 +00002601 uint8_t *ptr;
2602 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002603 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002604 hwaddr l = 8;
2605 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002606
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002607 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002608 false);
2609 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002610 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002611 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002612#if defined(TARGET_WORDS_BIGENDIAN)
2613 if (endian == DEVICE_LITTLE_ENDIAN) {
2614 val = bswap64(val);
2615 }
2616#else
2617 if (endian == DEVICE_BIG_ENDIAN) {
2618 val = bswap64(val);
2619 }
2620#endif
bellard84b7b8e2005-11-28 21:19:04 +00002621 } else {
2622 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002623 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002624 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002625 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002626 switch (endian) {
2627 case DEVICE_LITTLE_ENDIAN:
2628 val = ldq_le_p(ptr);
2629 break;
2630 case DEVICE_BIG_ENDIAN:
2631 val = ldq_be_p(ptr);
2632 break;
2633 default:
2634 val = ldq_p(ptr);
2635 break;
2636 }
bellard84b7b8e2005-11-28 21:19:04 +00002637 }
2638 return val;
2639}
2640
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002641uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002642{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002643 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002644}
2645
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002646uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002647{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002648 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002649}
2650
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002651uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002652{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002653 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002654}
2655
bellardaab33092005-10-30 20:48:42 +00002656/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002657uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002658{
2659 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002660 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002661 return val;
2662}
2663
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002664/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002665static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002666 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002667{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002668 uint8_t *ptr;
2669 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002670 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002671 hwaddr l = 2;
2672 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002673
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002674 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002675 false);
2676 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002677 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002678 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002679#if defined(TARGET_WORDS_BIGENDIAN)
2680 if (endian == DEVICE_LITTLE_ENDIAN) {
2681 val = bswap16(val);
2682 }
2683#else
2684 if (endian == DEVICE_BIG_ENDIAN) {
2685 val = bswap16(val);
2686 }
2687#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002688 } else {
2689 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002690 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002691 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002692 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002693 switch (endian) {
2694 case DEVICE_LITTLE_ENDIAN:
2695 val = lduw_le_p(ptr);
2696 break;
2697 case DEVICE_BIG_ENDIAN:
2698 val = lduw_be_p(ptr);
2699 break;
2700 default:
2701 val = lduw_p(ptr);
2702 break;
2703 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002704 }
2705 return val;
bellardaab33092005-10-30 20:48:42 +00002706}
2707
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002708uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002709{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002710 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002711}
2712
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002713uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002714{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002715 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002716}
2717
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002718uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002719{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002720 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002721}
2722
bellard8df1cd02005-01-28 22:37:22 +00002723/* warning: addr must be aligned. The ram page is not masked as dirty
2724 and the code inside is not invalidated. It is useful if the dirty
2725 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002726void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002727{
bellard8df1cd02005-01-28 22:37:22 +00002728 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002729 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002730 hwaddr l = 4;
2731 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002732
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002733 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002734 true);
2735 if (l < 4 || !memory_access_is_direct(mr, true)) {
2736 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002737 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002738 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002739 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002740 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002741
2742 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002743 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002744 /* invalidate code */
2745 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2746 /* set dirty bit */
Paolo Bonzini68868672014-07-21 16:45:18 +02002747 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
aliguori74576192008-10-06 14:02:03 +00002748 }
2749 }
bellard8df1cd02005-01-28 22:37:22 +00002750 }
2751}
2752
2753/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002754static inline void stl_phys_internal(AddressSpace *as,
2755 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002756 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002757{
bellard8df1cd02005-01-28 22:37:22 +00002758 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002759 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002760 hwaddr l = 4;
2761 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002762
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002763 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002764 true);
2765 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002766#if defined(TARGET_WORDS_BIGENDIAN)
2767 if (endian == DEVICE_LITTLE_ENDIAN) {
2768 val = bswap32(val);
2769 }
2770#else
2771 if (endian == DEVICE_BIG_ENDIAN) {
2772 val = bswap32(val);
2773 }
2774#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002775 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002776 } else {
bellard8df1cd02005-01-28 22:37:22 +00002777 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002778 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002779 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002780 switch (endian) {
2781 case DEVICE_LITTLE_ENDIAN:
2782 stl_le_p(ptr, val);
2783 break;
2784 case DEVICE_BIG_ENDIAN:
2785 stl_be_p(ptr, val);
2786 break;
2787 default:
2788 stl_p(ptr, val);
2789 break;
2790 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002791 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002792 }
2793}
2794
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002795void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002796{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002797 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002798}
2799
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002800void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002801{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002802 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002803}
2804
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002805void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002806{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002807 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002808}
2809
bellardaab33092005-10-30 20:48:42 +00002810/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002811void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002812{
2813 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002814 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002815}
2816
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002817/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002818static inline void stw_phys_internal(AddressSpace *as,
2819 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002820 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002821{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002822 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002823 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002824 hwaddr l = 2;
2825 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002826
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002827 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002828 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002829#if defined(TARGET_WORDS_BIGENDIAN)
2830 if (endian == DEVICE_LITTLE_ENDIAN) {
2831 val = bswap16(val);
2832 }
2833#else
2834 if (endian == DEVICE_BIG_ENDIAN) {
2835 val = bswap16(val);
2836 }
2837#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002838 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002839 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002840 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002841 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002842 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002843 switch (endian) {
2844 case DEVICE_LITTLE_ENDIAN:
2845 stw_le_p(ptr, val);
2846 break;
2847 case DEVICE_BIG_ENDIAN:
2848 stw_be_p(ptr, val);
2849 break;
2850 default:
2851 stw_p(ptr, val);
2852 break;
2853 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002854 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002855 }
bellardaab33092005-10-30 20:48:42 +00002856}
2857
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002858void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002859{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002860 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002861}
2862
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002863void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002864{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002865 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002866}
2867
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002868void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002869{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002870 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002871}
2872
bellardaab33092005-10-30 20:48:42 +00002873/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002874void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002875{
2876 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002877 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002878}
2879
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002880void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002881{
2882 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002883 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002884}
2885
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002886void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002887{
2888 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002889 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002890}
2891
aliguori5e2972f2009-03-28 17:51:36 +00002892/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002893int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002894 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002895{
2896 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002897 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002898 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002899
2900 while (len > 0) {
2901 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002902 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002903 /* if no physical page mapped, return an error */
2904 if (phys_addr == -1)
2905 return -1;
2906 l = (page + TARGET_PAGE_SIZE) - addr;
2907 if (l > len)
2908 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002909 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002910 if (is_write) {
2911 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2912 } else {
2913 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2914 }
bellard13eb76e2004-01-24 15:23:36 +00002915 len -= l;
2916 buf += l;
2917 addr += l;
2918 }
2919 return 0;
2920}
Paul Brooka68fe892010-03-01 00:08:59 +00002921#endif
bellard13eb76e2004-01-24 15:23:36 +00002922
Blue Swirl8e4a4242013-01-06 18:30:17 +00002923/*
2924 * A helper function for the _utterly broken_ virtio device model to find out if
2925 * it's running on a big endian machine. Don't do this at home kids!
2926 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02002927bool target_words_bigendian(void);
2928bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00002929{
2930#if defined(TARGET_WORDS_BIGENDIAN)
2931 return true;
2932#else
2933 return false;
2934#endif
2935}
2936
Wen Congyang76f35532012-05-07 12:04:18 +08002937#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002938bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002939{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002940 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002941 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002942
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002943 mr = address_space_translate(&address_space_memory,
2944 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002945
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002946 return !(memory_region_is_ram(mr) ||
2947 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002948}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002949
2950void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2951{
2952 RAMBlock *block;
2953
2954 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02002955 func(block->host, block->offset, block->used_length, opaque);
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002956 }
2957}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002958#endif