blob: ee5eff77340a152b1489fd897a420b482224c052 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060029#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020032#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010033#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/timer.h"
35#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010036#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010037#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010042#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010043#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000044#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010045#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000046
Paolo Bonzini022c62c2012-12-17 18:19:49 +010047#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000048#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000049
Paolo Bonzini022c62c2012-12-17 18:19:49 +010050#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020051#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010052#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020054#include "qemu/range.h"
55
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020059static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000060
Paolo Bonzinia3161032012-11-14 15:54:48 +010061RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030062
63static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030064static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030065
Avi Kivityf6790af2012-10-02 20:13:51 +020066AddressSpace address_space_io;
67AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020070static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färberbdc44642013-06-24 23:50:24 +020074struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020077DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020085typedef struct PhysPageEntry PhysPageEntry;
86
87struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020088 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020089 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020092};
93
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020094#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
95
Paolo Bonzini03f49952013-11-07 17:14:36 +010096/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010097#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +010098
Michael S. Tsirkin026736c2013-11-13 20:13:03 +020099#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100100#define P_L2_SIZE (1 << P_L2_BITS)
101
102#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
103
104typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200105
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200106typedef struct PhysPageMap {
107 unsigned sections_nb;
108 unsigned sections_nb_alloc;
109 unsigned nodes_nb;
110 unsigned nodes_nb_alloc;
111 Node *nodes;
112 MemoryRegionSection *sections;
113} PhysPageMap;
114
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200115struct AddressSpaceDispatch {
116 /* This is a multi-level map on the physical address space.
117 * The bottom level has pointers to MemoryRegionSections.
118 */
119 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200120 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200121 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200122};
123
Jan Kiszka90260c62013-05-26 21:46:51 +0200124#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
125typedef struct subpage_t {
126 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200127 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200128 hwaddr base;
129 uint16_t sub_section[TARGET_PAGE_SIZE];
130} subpage_t;
131
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200132#define PHYS_SECTION_UNASSIGNED 0
133#define PHYS_SECTION_NOTDIRTY 1
134#define PHYS_SECTION_ROM 2
135#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200136
pbrooke2eef172008-06-08 01:09:01 +0000137static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300138static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000139static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000140
Avi Kivity1ec9b902012-01-02 12:47:48 +0200141static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000142#endif
bellard54936002003-05-13 00:25:15 +0000143
Paul Brook6d9a1302010-02-28 23:55:53 +0000144#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200145
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200148 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
149 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
151 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200152 }
153}
154
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200155static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200156{
157 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200158 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200159
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200160 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200161 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100163 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200164 map->nodes[ret][i].skip = 1;
165 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200166 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200168}
169
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200170static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
171 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200172 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173{
174 PhysPageEntry *p;
175 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100176 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200177
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200178 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200179 lp->ptr = phys_map_node_alloc(map);
180 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200181 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100182 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200183 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200184 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185 }
186 }
187 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100190 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191
Paolo Bonzini03f49952013-11-07 17:14:36 +0100192 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200193 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200194 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200195 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200196 *index += step;
197 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200198 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200199 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200200 }
201 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200202 }
203}
204
Avi Kivityac1970f2012-10-03 16:22:53 +0200205static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200206 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200207 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000208{
Avi Kivity29990972012-02-13 20:21:20 +0200209 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200210 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000211
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200212 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000213}
214
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200215/* Compact a non leaf page entry. Simply detect that the entry has a single child,
216 * and update our entry so we can skip it and go directly to the destination.
217 */
218static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
219{
220 unsigned valid_ptr = P_L2_SIZE;
221 int valid = 0;
222 PhysPageEntry *p;
223 int i;
224
225 if (lp->ptr == PHYS_MAP_NODE_NIL) {
226 return;
227 }
228
229 p = nodes[lp->ptr];
230 for (i = 0; i < P_L2_SIZE; i++) {
231 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
232 continue;
233 }
234
235 valid_ptr = i;
236 valid++;
237 if (p[i].skip) {
238 phys_page_compact(&p[i], nodes, compacted);
239 }
240 }
241
242 /* We can only compress if there's only one child. */
243 if (valid != 1) {
244 return;
245 }
246
247 assert(valid_ptr < P_L2_SIZE);
248
249 /* Don't compress if it won't fit in the # of bits we have. */
250 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
251 return;
252 }
253
254 lp->ptr = p[valid_ptr].ptr;
255 if (!p[valid_ptr].skip) {
256 /* If our only child is a leaf, make this a leaf. */
257 /* By design, we should have made this node a leaf to begin with so we
258 * should never reach here.
259 * But since it's so simple to handle this, let's do it just in case we
260 * change this rule.
261 */
262 lp->skip = 0;
263 } else {
264 lp->skip += p[valid_ptr].skip;
265 }
266}
267
268static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
269{
270 DECLARE_BITMAP(compacted, nodes_nb);
271
272 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200273 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200274 }
275}
276
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200277static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200278 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000279{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200280 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200281 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200282 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200283
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200284 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200285 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200286 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200287 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200288 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100289 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200290 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200291
292 if (sections[lp.ptr].size.hi ||
293 range_covers_byte(sections[lp.ptr].offset_within_address_space,
294 sections[lp.ptr].size.lo, addr)) {
295 return &sections[lp.ptr];
296 } else {
297 return &sections[PHYS_SECTION_UNASSIGNED];
298 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200299}
300
Blue Swirle5548612012-04-21 13:08:33 +0000301bool memory_region_is_unassigned(MemoryRegion *mr)
302{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200303 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000304 && mr != &io_mem_watch;
305}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200306
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200307static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200308 hwaddr addr,
309 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200310{
Jan Kiszka90260c62013-05-26 21:46:51 +0200311 MemoryRegionSection *section;
312 subpage_t *subpage;
313
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200314 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200315 if (resolve_subpage && section->mr->subpage) {
316 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200317 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200318 }
319 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200320}
321
Jan Kiszka90260c62013-05-26 21:46:51 +0200322static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200323address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200324 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200325{
326 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100327 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200328
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200329 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200330 /* Compute offset within MemoryRegionSection */
331 addr -= section->offset_within_address_space;
332
333 /* Compute offset within MemoryRegion */
334 *xlat = addr + section->offset_within_region;
335
336 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100337 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200338 return section;
339}
Jan Kiszka90260c62013-05-26 21:46:51 +0200340
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100341static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
342{
343 if (memory_region_is_ram(mr)) {
344 return !(is_write && mr->readonly);
345 }
346 if (memory_region_is_romd(mr)) {
347 return !is_write;
348 }
349
350 return false;
351}
352
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200353MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
354 hwaddr *xlat, hwaddr *plen,
355 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200356{
Avi Kivity30951152012-10-30 13:47:46 +0200357 IOMMUTLBEntry iotlb;
358 MemoryRegionSection *section;
359 MemoryRegion *mr;
360 hwaddr len = *plen;
361
362 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100363 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200364 mr = section->mr;
365
366 if (!mr->iommu_ops) {
367 break;
368 }
369
370 iotlb = mr->iommu_ops->translate(mr, addr);
371 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
372 | (addr & iotlb.addr_mask));
373 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
374 if (!(iotlb.perm & (1 << is_write))) {
375 mr = &io_mem_unassigned;
376 break;
377 }
378
379 as = iotlb.target_as;
380 }
381
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100382 if (memory_access_is_direct(mr, is_write)) {
383 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
384 len = MIN(page, len);
385 }
386
Avi Kivity30951152012-10-30 13:47:46 +0200387 *plen = len;
388 *xlat = addr;
389 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200390}
391
392MemoryRegionSection *
393address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
394 hwaddr *plen)
395{
Avi Kivity30951152012-10-30 13:47:46 +0200396 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200397 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200398
399 assert(!section->mr->iommu_ops);
400 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200401}
bellard9fa3e852004-01-04 18:06:42 +0000402#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000403
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200404void cpu_exec_init_all(void)
405{
406#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700407 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200408 memory_map_init();
409 io_mem_init();
410#endif
411}
412
Andreas Färberb170fce2013-01-20 20:23:22 +0100413#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000414
Juan Quintelae59fb372009-09-29 22:48:21 +0200415static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200416{
Andreas Färber259186a2013-01-17 18:51:17 +0100417 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200418
aurel323098dba2009-03-07 21:28:24 +0000419 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
420 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100421 cpu->interrupt_request &= ~0x01;
422 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000423
424 return 0;
425}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200426
Andreas Färber1a1562f2013-06-17 04:09:11 +0200427const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200428 .name = "cpu_common",
429 .version_id = 1,
430 .minimum_version_id = 1,
431 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200432 .post_load = cpu_common_post_load,
433 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100434 VMSTATE_UINT32(halted, CPUState),
435 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200436 VMSTATE_END_OF_LIST()
437 }
438};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200439
pbrook9656f322008-07-01 20:01:19 +0000440#endif
441
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100442CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400443{
Andreas Färberbdc44642013-06-24 23:50:24 +0200444 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400445
Andreas Färberbdc44642013-06-24 23:50:24 +0200446 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100447 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200448 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100449 }
Glauber Costa950f1472009-06-09 12:15:18 -0400450 }
451
Andreas Färberbdc44642013-06-24 23:50:24 +0200452 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400453}
454
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000455#if !defined(CONFIG_USER_ONLY)
456void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
457{
458 /* We only support one address space per cpu at the moment. */
459 assert(cpu->as == as);
460
461 if (cpu->tcg_as_listener) {
462 memory_listener_unregister(cpu->tcg_as_listener);
463 } else {
464 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
465 }
466 cpu->tcg_as_listener->commit = tcg_commit;
467 memory_listener_register(cpu->tcg_as_listener, as);
468}
469#endif
470
Andreas Färber9349b4f2012-03-14 01:38:32 +0100471void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000472{
Andreas Färber9f09e182012-05-03 06:59:07 +0200473 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100474 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200475 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000476 int cpu_index;
477
pbrookc2764712009-03-07 15:24:59 +0000478#if defined(CONFIG_USER_ONLY)
479 cpu_list_lock();
480#endif
bellard6a00d602005-11-21 23:25:50 +0000481 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200482 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000483 cpu_index++;
484 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100485 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100486 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000487 QTAILQ_INIT(&env->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200488 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100489#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000490 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200491 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100492#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200493 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000494#if defined(CONFIG_USER_ONLY)
495 cpu_list_unlock();
496#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200497 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
498 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
499 }
pbrookb3c77242008-06-30 16:31:04 +0000500#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600501 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000502 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100503 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200504 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000505#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100506 if (cc->vmsd != NULL) {
507 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
508 }
bellardfd6ce8f2003-05-14 19:00:11 +0000509}
510
bellard1fddef42005-04-17 19:16:13 +0000511#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000512#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200513static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000514{
515 tb_invalidate_phys_page_range(pc, pc + 1, 0);
516}
517#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200518static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400519{
Max Filippove8262a12013-09-27 22:29:17 +0400520 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
521 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000522 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100523 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400524 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400525}
bellardc27004e2005-01-03 23:35:10 +0000526#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000527#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000528
Paul Brookc527ee82010-03-01 03:31:14 +0000529#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100530void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000531
532{
533}
534
Andreas Färber9349b4f2012-03-14 01:38:32 +0100535int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000536 int flags, CPUWatchpoint **watchpoint)
537{
538 return -ENOSYS;
539}
540#else
pbrook6658ffb2007-03-16 23:58:11 +0000541/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100542int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000543 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000544{
Andreas Färberff4700b2013-08-26 18:23:18 +0200545 CPUState *cpu = ENV_GET_CPU(env);
aliguorib4051332008-11-18 20:14:20 +0000546 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000547 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000548
aliguorib4051332008-11-18 20:14:20 +0000549 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400550 if ((len & (len - 1)) || (addr & ~len_mask) ||
551 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000552 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
553 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
554 return -EINVAL;
555 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500556 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000557
aliguoria1d1bb32008-11-18 20:07:32 +0000558 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000559 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000560 wp->flags = flags;
561
aliguori2dc9f412008-11-18 20:56:59 +0000562 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200563 if (flags & BP_GDB) {
564 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
565 } else {
566 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
567 }
aliguoria1d1bb32008-11-18 20:07:32 +0000568
pbrook6658ffb2007-03-16 23:58:11 +0000569 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000570
571 if (watchpoint)
572 *watchpoint = wp;
573 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000574}
575
aliguoria1d1bb32008-11-18 20:07:32 +0000576/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100577int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000578 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000579{
Andreas Färberff4700b2013-08-26 18:23:18 +0200580 CPUState *cpu = ENV_GET_CPU(env);
aliguorib4051332008-11-18 20:14:20 +0000581 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000582 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000583
Andreas Färberff4700b2013-08-26 18:23:18 +0200584 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000585 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000586 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000587 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000588 return 0;
589 }
590 }
aliguoria1d1bb32008-11-18 20:07:32 +0000591 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000592}
593
aliguoria1d1bb32008-11-18 20:07:32 +0000594/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100595void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000596{
Andreas Färberff4700b2013-08-26 18:23:18 +0200597 CPUState *cpu = ENV_GET_CPU(env);
598
599 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000600
aliguoria1d1bb32008-11-18 20:07:32 +0000601 tlb_flush_page(env, watchpoint->vaddr);
602
Anthony Liguori7267c092011-08-20 22:09:37 -0500603 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000604}
605
aliguoria1d1bb32008-11-18 20:07:32 +0000606/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100607void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000608{
Andreas Färberff4700b2013-08-26 18:23:18 +0200609 CPUState *cpu = ENV_GET_CPU(env);
aliguoric0ce9982008-11-25 22:13:57 +0000610 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000611
Andreas Färberff4700b2013-08-26 18:23:18 +0200612 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000613 if (wp->flags & mask)
614 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000615 }
aliguoria1d1bb32008-11-18 20:07:32 +0000616}
Paul Brookc527ee82010-03-01 03:31:14 +0000617#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000618
619/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100620int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000621 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000622{
bellard1fddef42005-04-17 19:16:13 +0000623#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000624 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000625
Anthony Liguori7267c092011-08-20 22:09:37 -0500626 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000627
628 bp->pc = pc;
629 bp->flags = flags;
630
aliguori2dc9f412008-11-18 20:56:59 +0000631 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200632 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000633 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200634 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000635 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200636 }
aliguoria1d1bb32008-11-18 20:07:32 +0000637
Andreas Färber00b941e2013-06-29 18:55:54 +0200638 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000639
Andreas Färber00b941e2013-06-29 18:55:54 +0200640 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000641 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200642 }
aliguoria1d1bb32008-11-18 20:07:32 +0000643 return 0;
644#else
645 return -ENOSYS;
646#endif
647}
648
649/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100650int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000651{
652#if defined(TARGET_HAS_ICE)
653 CPUBreakpoint *bp;
654
Blue Swirl72cf2d42009-09-12 07:36:22 +0000655 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000656 if (bp->pc == pc && bp->flags == flags) {
657 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000658 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000659 }
bellard4c3a88a2003-07-26 12:06:08 +0000660 }
aliguoria1d1bb32008-11-18 20:07:32 +0000661 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000662#else
aliguoria1d1bb32008-11-18 20:07:32 +0000663 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000664#endif
665}
666
aliguoria1d1bb32008-11-18 20:07:32 +0000667/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100668void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000669{
bellard1fddef42005-04-17 19:16:13 +0000670#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000671 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000672
Andreas Färber00b941e2013-06-29 18:55:54 +0200673 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000674
Anthony Liguori7267c092011-08-20 22:09:37 -0500675 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000676#endif
677}
678
679/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100680void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000681{
682#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000683 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000684
Blue Swirl72cf2d42009-09-12 07:36:22 +0000685 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000686 if (bp->flags & mask)
687 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000688 }
bellard4c3a88a2003-07-26 12:06:08 +0000689#endif
690}
691
bellardc33a3462003-07-29 20:50:33 +0000692/* enable or disable single step mode. EXCP_DEBUG is returned by the
693 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200694void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000695{
bellard1fddef42005-04-17 19:16:13 +0000696#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200697 if (cpu->singlestep_enabled != enabled) {
698 cpu->singlestep_enabled = enabled;
699 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200700 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200701 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100702 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000703 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200704 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000705 tb_flush(env);
706 }
bellardc33a3462003-07-29 20:50:33 +0000707 }
708#endif
709}
710
Andreas Färber9349b4f2012-03-14 01:38:32 +0100711void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000712{
Andreas Färber878096e2013-05-27 01:33:50 +0200713 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000714 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000715 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000716
717 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000718 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000719 fprintf(stderr, "qemu: fatal: ");
720 vfprintf(stderr, fmt, ap);
721 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200722 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000723 if (qemu_log_enabled()) {
724 qemu_log("qemu: fatal: ");
725 qemu_log_vprintf(fmt, ap2);
726 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200727 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000728 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000729 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000730 }
pbrook493ae1f2007-11-23 16:53:59 +0000731 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000732 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200733#if defined(CONFIG_USER_ONLY)
734 {
735 struct sigaction act;
736 sigfillset(&act.sa_mask);
737 act.sa_handler = SIG_DFL;
738 sigaction(SIGABRT, &act, NULL);
739 }
740#endif
bellard75012672003-06-21 13:11:07 +0000741 abort();
742}
743
bellard01243112004-01-04 15:48:17 +0000744#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200745static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
746{
747 RAMBlock *block;
748
749 /* The list is protected by the iothread lock here. */
750 block = ram_list.mru_block;
751 if (block && addr - block->offset < block->length) {
752 goto found;
753 }
754 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
755 if (addr - block->offset < block->length) {
756 goto found;
757 }
758 }
759
760 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
761 abort();
762
763found:
764 ram_list.mru_block = block;
765 return block;
766}
767
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200768static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000769{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200770 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200771 RAMBlock *block;
772 ram_addr_t end;
773
774 end = TARGET_PAGE_ALIGN(start + length);
775 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000776
Paolo Bonzini041603f2013-09-09 17:49:45 +0200777 block = qemu_get_ram_block(start);
778 assert(block == qemu_get_ram_block(end - 1));
779 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000780 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200781}
782
783/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200784void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200785 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200786{
Juan Quintelad24981d2012-05-22 00:42:40 +0200787 if (length == 0)
788 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200789 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200790
791 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200792 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200793 }
bellard1ccde1c2004-02-06 19:46:14 +0000794}
795
Juan Quintela981fdf22013-10-10 11:54:09 +0200796static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000797{
798 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000799}
800
Avi Kivitya8170e52012-10-23 12:30:10 +0200801hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200802 MemoryRegionSection *section,
803 target_ulong vaddr,
804 hwaddr paddr, hwaddr xlat,
805 int prot,
806 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000807{
Andreas Färberff4700b2013-08-26 18:23:18 +0200808 CPUState *cpu = ENV_GET_CPU(env);
Avi Kivitya8170e52012-10-23 12:30:10 +0200809 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000810 CPUWatchpoint *wp;
811
Blue Swirlcc5bea62012-04-14 14:56:48 +0000812 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000813 /* Normal RAM. */
814 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200815 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000816 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200817 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000818 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200819 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000820 }
821 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100822 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200823 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000824 }
825
826 /* Make accesses to pages with watchpoints go via the
827 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200828 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Blue Swirle5548612012-04-21 13:08:33 +0000829 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
830 /* Avoid trapping reads of pages with a write breakpoint. */
831 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200832 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000833 *address |= TLB_MMIO;
834 break;
835 }
836 }
837 }
838
839 return iotlb;
840}
bellard9fa3e852004-01-04 18:06:42 +0000841#endif /* defined(CONFIG_USER_ONLY) */
842
pbrooke2eef172008-06-08 01:09:01 +0000843#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000844
Anthony Liguoric227f092009-10-01 16:12:16 -0500845static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200846 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200847static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200848
Stefan Weil575ddeb2013-09-29 20:56:45 +0200849static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200850
851/*
852 * Set a custom physical guest memory alloator.
853 * Accelerators with unusual needs may need this. Hopefully, we can
854 * get rid of it eventually.
855 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200856void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200857{
858 phys_mem_alloc = alloc;
859}
860
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200861static uint16_t phys_section_add(PhysPageMap *map,
862 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200863{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200864 /* The physical section number is ORed with a page-aligned
865 * pointer to produce the iotlb entries. Thus it should
866 * never overflow into the page-aligned value.
867 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200868 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200869
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200870 if (map->sections_nb == map->sections_nb_alloc) {
871 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
872 map->sections = g_renew(MemoryRegionSection, map->sections,
873 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200874 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200875 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200876 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200877 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200878}
879
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200880static void phys_section_destroy(MemoryRegion *mr)
881{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200882 memory_region_unref(mr);
883
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200884 if (mr->subpage) {
885 subpage_t *subpage = container_of(mr, subpage_t, iomem);
886 memory_region_destroy(&subpage->iomem);
887 g_free(subpage);
888 }
889}
890
Paolo Bonzini60926662013-05-29 12:30:26 +0200891static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200892{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200893 while (map->sections_nb > 0) {
894 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200895 phys_section_destroy(section->mr);
896 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200897 g_free(map->sections);
898 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200899}
900
Avi Kivityac1970f2012-10-03 16:22:53 +0200901static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200902{
903 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200904 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200905 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200906 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200907 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200908 MemoryRegionSection subsection = {
909 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200910 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200911 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200912 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200913
Avi Kivityf3705d52012-03-08 16:16:34 +0200914 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200915
Avi Kivityf3705d52012-03-08 16:16:34 +0200916 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200917 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100918 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200919 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200920 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200921 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200922 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200923 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200924 }
925 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200926 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200927 subpage_register(subpage, start, end,
928 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200929}
930
931
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200932static void register_multipage(AddressSpaceDispatch *d,
933 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000934{
Avi Kivitya8170e52012-10-23 12:30:10 +0200935 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200936 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200937 uint64_t num_pages = int128_get64(int128_rshift(section->size,
938 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200939
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200940 assert(num_pages);
941 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000942}
943
Avi Kivityac1970f2012-10-03 16:22:53 +0200944static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200945{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200946 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200947 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200948 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200949 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200950
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200951 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
952 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
953 - now.offset_within_address_space;
954
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200955 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200956 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200957 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200958 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200959 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200960 while (int128_ne(remain.size, now.size)) {
961 remain.size = int128_sub(remain.size, now.size);
962 remain.offset_within_address_space += int128_get64(now.size);
963 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400964 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200965 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200966 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800967 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200968 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200969 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400970 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200971 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200972 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400973 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200974 }
975}
976
Sheng Yang62a27442010-01-26 19:21:16 +0800977void qemu_flush_coalesced_mmio_buffer(void)
978{
979 if (kvm_enabled())
980 kvm_flush_coalesced_mmio_buffer();
981}
982
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700983void qemu_mutex_lock_ramlist(void)
984{
985 qemu_mutex_lock(&ram_list.mutex);
986}
987
988void qemu_mutex_unlock_ramlist(void)
989{
990 qemu_mutex_unlock(&ram_list.mutex);
991}
992
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200993#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300994
995#include <sys/vfs.h>
996
997#define HUGETLBFS_MAGIC 0x958458f6
998
999static long gethugepagesize(const char *path)
1000{
1001 struct statfs fs;
1002 int ret;
1003
1004 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001005 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001006 } while (ret != 0 && errno == EINTR);
1007
1008 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001009 perror(path);
1010 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001011 }
1012
1013 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001014 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001015
1016 return fs.f_bsize;
1017}
1018
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001019static sigjmp_buf sigjump;
1020
1021static void sigbus_handler(int signal)
1022{
1023 siglongjmp(sigjump, 1);
1024}
1025
Alex Williamson04b16652010-07-02 11:13:17 -06001026static void *file_ram_alloc(RAMBlock *block,
1027 ram_addr_t memory,
1028 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001029{
1030 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001031 char *sanitized_name;
1032 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001033 void *area;
1034 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001035 unsigned long hpagesize;
1036
1037 hpagesize = gethugepagesize(path);
1038 if (!hpagesize) {
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001039 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001040 }
1041
1042 if (memory < hpagesize) {
1043 return NULL;
1044 }
1045
1046 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1047 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001048 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001049 }
1050
Peter Feiner8ca761f2013-03-04 13:54:25 -05001051 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1052 sanitized_name = g_strdup(block->mr->name);
1053 for (c = sanitized_name; *c != '\0'; c++) {
1054 if (*c == '/')
1055 *c = '_';
1056 }
1057
1058 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1059 sanitized_name);
1060 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001061
1062 fd = mkstemp(filename);
1063 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001064 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001065 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001066 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001067 }
1068 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001069 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001070
1071 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1072
1073 /*
1074 * ftruncate is not supported by hugetlbfs in older
1075 * hosts, so don't bother bailing out on errors.
1076 * If anything goes wrong with it under other filesystems,
1077 * mmap will fail.
1078 */
1079 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001080 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001081
Marcelo Tosattic9027602010-03-01 20:25:08 -03001082 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001083 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001084 perror("file_ram_alloc: can't mmap RAM pages");
1085 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001086 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001087 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001088
1089 if (mem_prealloc) {
1090 int ret, i;
1091 struct sigaction act, oldact;
1092 sigset_t set, oldset;
1093
1094 memset(&act, 0, sizeof(act));
1095 act.sa_handler = &sigbus_handler;
1096 act.sa_flags = 0;
1097
1098 ret = sigaction(SIGBUS, &act, &oldact);
1099 if (ret) {
1100 perror("file_ram_alloc: failed to install signal handler");
1101 exit(1);
1102 }
1103
1104 /* unblock SIGBUS */
1105 sigemptyset(&set);
1106 sigaddset(&set, SIGBUS);
1107 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1108
1109 if (sigsetjmp(sigjump, 1)) {
1110 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1111 exit(1);
1112 }
1113
1114 /* MAP_POPULATE silently ignores failures */
Marcelo Tosatti2ba82852013-12-18 16:42:17 -02001115 for (i = 0; i < (memory/hpagesize); i++) {
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001116 memset(area + (hpagesize*i), 0, 1);
1117 }
1118
1119 ret = sigaction(SIGBUS, &oldact, NULL);
1120 if (ret) {
1121 perror("file_ram_alloc: failed to reinstall signal handler");
1122 exit(1);
1123 }
1124
1125 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1126 }
1127
Alex Williamson04b16652010-07-02 11:13:17 -06001128 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001129 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001130
1131error:
1132 if (mem_prealloc) {
1133 exit(1);
1134 }
1135 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001136}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001137#else
1138static void *file_ram_alloc(RAMBlock *block,
1139 ram_addr_t memory,
1140 const char *path)
1141{
1142 fprintf(stderr, "-mem-path not supported on this host\n");
1143 exit(1);
1144}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001145#endif
1146
Alex Williamsond17b5282010-06-25 11:08:38 -06001147static ram_addr_t find_ram_offset(ram_addr_t size)
1148{
Alex Williamson04b16652010-07-02 11:13:17 -06001149 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001150 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001151
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001152 assert(size != 0); /* it would hand out same offset multiple times */
1153
Paolo Bonzinia3161032012-11-14 15:54:48 +01001154 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001155 return 0;
1156
Paolo Bonzinia3161032012-11-14 15:54:48 +01001157 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001158 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001159
1160 end = block->offset + block->length;
1161
Paolo Bonzinia3161032012-11-14 15:54:48 +01001162 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001163 if (next_block->offset >= end) {
1164 next = MIN(next, next_block->offset);
1165 }
1166 }
1167 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001168 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001169 mingap = next - end;
1170 }
1171 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001172
1173 if (offset == RAM_ADDR_MAX) {
1174 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1175 (uint64_t)size);
1176 abort();
1177 }
1178
Alex Williamson04b16652010-07-02 11:13:17 -06001179 return offset;
1180}
1181
Juan Quintela652d7ec2012-07-20 10:37:54 +02001182ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001183{
Alex Williamsond17b5282010-06-25 11:08:38 -06001184 RAMBlock *block;
1185 ram_addr_t last = 0;
1186
Paolo Bonzinia3161032012-11-14 15:54:48 +01001187 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001188 last = MAX(last, block->offset + block->length);
1189
1190 return last;
1191}
1192
Jason Baronddb97f12012-08-02 15:44:16 -04001193static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1194{
1195 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001196
1197 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001198 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1199 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001200 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1201 if (ret) {
1202 perror("qemu_madvise");
1203 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1204 "but dump_guest_core=off specified\n");
1205 }
1206 }
1207}
1208
Avi Kivityc5705a72011-12-20 15:59:12 +02001209void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001210{
1211 RAMBlock *new_block, *block;
1212
Avi Kivityc5705a72011-12-20 15:59:12 +02001213 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001214 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001215 if (block->offset == addr) {
1216 new_block = block;
1217 break;
1218 }
1219 }
1220 assert(new_block);
1221 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001222
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001223 if (dev) {
1224 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001225 if (id) {
1226 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001227 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001228 }
1229 }
1230 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1231
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001232 /* This assumes the iothread lock is taken here too. */
1233 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001234 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001235 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001236 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1237 new_block->idstr);
1238 abort();
1239 }
1240 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001241 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001242}
1243
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001244static int memory_try_enable_merging(void *addr, size_t len)
1245{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001246 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001247 /* disabled by the user */
1248 return 0;
1249 }
1250
1251 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1252}
1253
Avi Kivityc5705a72011-12-20 15:59:12 +02001254ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1255 MemoryRegion *mr)
1256{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001257 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001258 ram_addr_t old_ram_size, new_ram_size;
1259
1260 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001261
1262 size = TARGET_PAGE_ALIGN(size);
1263 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001264 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001265
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001266 /* This assumes the iothread lock is taken here too. */
1267 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001268 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001269 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001270 if (host) {
1271 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001272 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001273 } else if (xen_enabled()) {
1274 if (mem_path) {
1275 fprintf(stderr, "-mem-path not supported with Xen\n");
1276 exit(1);
1277 }
1278 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001279 } else {
1280 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001281 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1282 /*
1283 * file_ram_alloc() needs to allocate just like
1284 * phys_mem_alloc, but we haven't bothered to provide
1285 * a hook there.
1286 */
1287 fprintf(stderr,
1288 "-mem-path not supported with this accelerator\n");
1289 exit(1);
1290 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001291 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001292 }
1293 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001294 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001295 if (!new_block->host) {
1296 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1297 new_block->mr->name, strerror(errno));
1298 exit(1);
1299 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001300 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001301 }
1302 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001303 new_block->length = size;
1304
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001305 /* Keep the list sorted from biggest to smallest block. */
1306 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1307 if (block->length < new_block->length) {
1308 break;
1309 }
1310 }
1311 if (block) {
1312 QTAILQ_INSERT_BEFORE(block, new_block, next);
1313 } else {
1314 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1315 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001316 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001317
Umesh Deshpandef798b072011-08-18 11:41:17 -07001318 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001319 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001320
Juan Quintela2152f5c2013-10-08 13:52:02 +02001321 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1322
1323 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001324 int i;
1325 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1326 ram_list.dirty_memory[i] =
1327 bitmap_zero_extend(ram_list.dirty_memory[i],
1328 old_ram_size, new_ram_size);
1329 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001330 }
Juan Quintela75218e72013-10-08 12:31:54 +02001331 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001332
Jason Baronddb97f12012-08-02 15:44:16 -04001333 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001334 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001335 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001336
Cam Macdonell84b89d72010-07-26 18:10:57 -06001337 if (kvm_enabled())
1338 kvm_setup_guest_memory(new_block->host, size);
1339
1340 return new_block->offset;
1341}
1342
Avi Kivityc5705a72011-12-20 15:59:12 +02001343ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001344{
Avi Kivityc5705a72011-12-20 15:59:12 +02001345 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001346}
bellarde9a1ab12007-02-08 23:08:38 +00001347
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001348void qemu_ram_free_from_ptr(ram_addr_t addr)
1349{
1350 RAMBlock *block;
1351
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001352 /* This assumes the iothread lock is taken here too. */
1353 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001354 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001355 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001356 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001357 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001358 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001359 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001360 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001361 }
1362 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001363 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001364}
1365
Anthony Liguoric227f092009-10-01 16:12:16 -05001366void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001367{
Alex Williamson04b16652010-07-02 11:13:17 -06001368 RAMBlock *block;
1369
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001370 /* This assumes the iothread lock is taken here too. */
1371 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001372 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001373 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001374 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001375 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001376 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001377 if (block->flags & RAM_PREALLOC_MASK) {
1378 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001379 } else if (xen_enabled()) {
1380 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001381#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001382 } else if (block->fd >= 0) {
1383 munmap(block->host, block->length);
1384 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001385#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001386 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001387 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001388 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001389 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001390 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001391 }
1392 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001393 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001394
bellarde9a1ab12007-02-08 23:08:38 +00001395}
1396
Huang Yingcd19cfa2011-03-02 08:56:19 +01001397#ifndef _WIN32
1398void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1399{
1400 RAMBlock *block;
1401 ram_addr_t offset;
1402 int flags;
1403 void *area, *vaddr;
1404
Paolo Bonzinia3161032012-11-14 15:54:48 +01001405 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001406 offset = addr - block->offset;
1407 if (offset < block->length) {
1408 vaddr = block->host + offset;
1409 if (block->flags & RAM_PREALLOC_MASK) {
1410 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001411 } else if (xen_enabled()) {
1412 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001413 } else {
1414 flags = MAP_FIXED;
1415 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001416 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001417#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001418 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1419 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001420#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001421 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001422#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001423 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1424 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001425 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001426 /*
1427 * Remap needs to match alloc. Accelerators that
1428 * set phys_mem_alloc never remap. If they did,
1429 * we'd need a remap hook here.
1430 */
1431 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1432
Huang Yingcd19cfa2011-03-02 08:56:19 +01001433 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1434 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1435 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001436 }
1437 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001438 fprintf(stderr, "Could not remap addr: "
1439 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001440 length, addr);
1441 exit(1);
1442 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001443 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001444 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001445 }
1446 return;
1447 }
1448 }
1449}
1450#endif /* !_WIN32 */
1451
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001452/* Return a host pointer to ram allocated with qemu_ram_alloc.
1453 With the exception of the softmmu code in this file, this should
1454 only be used for local memory (e.g. video ram) that the device owns,
1455 and knows it isn't going to access beyond the end of the block.
1456
1457 It should not be used for general purpose DMA.
1458 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1459 */
1460void *qemu_get_ram_ptr(ram_addr_t addr)
1461{
1462 RAMBlock *block = qemu_get_ram_block(addr);
1463
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001464 if (xen_enabled()) {
1465 /* We need to check if the requested address is in the RAM
1466 * because we don't want to map the entire memory in QEMU.
1467 * In that case just map until the end of the page.
1468 */
1469 if (block->offset == 0) {
1470 return xen_map_cache(addr, 0, 0);
1471 } else if (block->host == NULL) {
1472 block->host =
1473 xen_map_cache(block->offset, block->length, 1);
1474 }
1475 }
1476 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001477}
1478
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001479/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1480 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001481static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001482{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001483 if (*size == 0) {
1484 return NULL;
1485 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001486 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001487 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001488 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001489 RAMBlock *block;
1490
Paolo Bonzinia3161032012-11-14 15:54:48 +01001491 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001492 if (addr - block->offset < block->length) {
1493 if (addr - block->offset + *size > block->length)
1494 *size = block->length - addr + block->offset;
1495 return block->host + (addr - block->offset);
1496 }
1497 }
1498
1499 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1500 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001501 }
1502}
1503
Paolo Bonzini7443b432013-06-03 12:44:02 +02001504/* Some of the softmmu routines need to translate from a host pointer
1505 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001506MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001507{
pbrook94a6b542009-04-11 17:15:54 +00001508 RAMBlock *block;
1509 uint8_t *host = ptr;
1510
Jan Kiszka868bb332011-06-21 22:59:09 +02001511 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001512 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001513 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001514 }
1515
Paolo Bonzini23887b72013-05-06 14:28:39 +02001516 block = ram_list.mru_block;
1517 if (block && block->host && host - block->host < block->length) {
1518 goto found;
1519 }
1520
Paolo Bonzinia3161032012-11-14 15:54:48 +01001521 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001522 /* This case append when the block is not mapped. */
1523 if (block->host == NULL) {
1524 continue;
1525 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001526 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001527 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001528 }
pbrook94a6b542009-04-11 17:15:54 +00001529 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001530
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001531 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001532
1533found:
1534 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001535 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001536}
Alex Williamsonf471a172010-06-11 11:11:42 -06001537
Avi Kivitya8170e52012-10-23 12:30:10 +02001538static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001539 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001540{
Juan Quintela52159192013-10-08 12:44:04 +02001541 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001542 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001543 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001544 switch (size) {
1545 case 1:
1546 stb_p(qemu_get_ram_ptr(ram_addr), val);
1547 break;
1548 case 2:
1549 stw_p(qemu_get_ram_ptr(ram_addr), val);
1550 break;
1551 case 4:
1552 stl_p(qemu_get_ram_ptr(ram_addr), val);
1553 break;
1554 default:
1555 abort();
1556 }
Juan Quintela52159192013-10-08 12:44:04 +02001557 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1558 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001559 /* we remove the notdirty callback only if the code has been
1560 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001561 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001562 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001563 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001564 }
bellard1ccde1c2004-02-06 19:46:14 +00001565}
1566
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001567static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1568 unsigned size, bool is_write)
1569{
1570 return is_write;
1571}
1572
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001573static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001574 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001575 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001576 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001577};
1578
pbrook0f459d12008-06-09 00:20:13 +00001579/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001580static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001581{
Andreas Färber93afead2013-08-26 03:41:01 +02001582 CPUState *cpu = current_cpu;
1583 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001584 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001585 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001586 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001587 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001588
Andreas Färberff4700b2013-08-26 18:23:18 +02001589 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001590 /* We re-entered the check after replacing the TB. Now raise
1591 * the debug interrupt so that is will trigger after the
1592 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001593 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001594 return;
1595 }
Andreas Färber93afead2013-08-26 03:41:01 +02001596 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001597 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001598 if ((vaddr == (wp->vaddr & len_mask) ||
1599 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001600 wp->flags |= BP_WATCHPOINT_HIT;
Andreas Färberff4700b2013-08-26 18:23:18 +02001601 if (!cpu->watchpoint_hit) {
1602 cpu->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001603 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001604 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001605 cpu->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001606 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001607 } else {
1608 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1609 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001610 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001611 }
aliguori06d55cc2008-11-18 20:24:06 +00001612 }
aliguori6e140f22008-11-18 20:37:55 +00001613 } else {
1614 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001615 }
1616 }
1617}
1618
pbrook6658ffb2007-03-16 23:58:11 +00001619/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1620 so these check for a hit then pass through to the normal out-of-line
1621 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001622static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001623 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001624{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001625 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1626 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001627 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001628 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001629 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001630 default: abort();
1631 }
pbrook6658ffb2007-03-16 23:58:11 +00001632}
1633
Avi Kivitya8170e52012-10-23 12:30:10 +02001634static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001635 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001636{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001637 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1638 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001639 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001640 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001641 break;
1642 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001643 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001644 break;
1645 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001646 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001647 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001648 default: abort();
1649 }
pbrook6658ffb2007-03-16 23:58:11 +00001650}
1651
Avi Kivity1ec9b902012-01-02 12:47:48 +02001652static const MemoryRegionOps watch_mem_ops = {
1653 .read = watch_mem_read,
1654 .write = watch_mem_write,
1655 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001656};
pbrook6658ffb2007-03-16 23:58:11 +00001657
Avi Kivitya8170e52012-10-23 12:30:10 +02001658static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001659 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001660{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001661 subpage_t *subpage = opaque;
1662 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001663
blueswir1db7b5422007-05-26 17:36:03 +00001664#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001665 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001666 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001667#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001668 address_space_read(subpage->as, addr + subpage->base, buf, len);
1669 switch (len) {
1670 case 1:
1671 return ldub_p(buf);
1672 case 2:
1673 return lduw_p(buf);
1674 case 4:
1675 return ldl_p(buf);
1676 default:
1677 abort();
1678 }
blueswir1db7b5422007-05-26 17:36:03 +00001679}
1680
Avi Kivitya8170e52012-10-23 12:30:10 +02001681static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001682 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001683{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001684 subpage_t *subpage = opaque;
1685 uint8_t buf[4];
1686
blueswir1db7b5422007-05-26 17:36:03 +00001687#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001688 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001689 " value %"PRIx64"\n",
1690 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001691#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001692 switch (len) {
1693 case 1:
1694 stb_p(buf, value);
1695 break;
1696 case 2:
1697 stw_p(buf, value);
1698 break;
1699 case 4:
1700 stl_p(buf, value);
1701 break;
1702 default:
1703 abort();
1704 }
1705 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001706}
1707
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001708static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001709 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001710{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001711 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001712#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001713 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001714 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001715#endif
1716
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001717 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001718 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001719}
1720
Avi Kivity70c68e42012-01-02 12:32:48 +02001721static const MemoryRegionOps subpage_ops = {
1722 .read = subpage_read,
1723 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001724 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001725 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001726};
1727
Anthony Liguoric227f092009-10-01 16:12:16 -05001728static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001729 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001730{
1731 int idx, eidx;
1732
1733 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1734 return -1;
1735 idx = SUBPAGE_IDX(start);
1736 eidx = SUBPAGE_IDX(end);
1737#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001738 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1739 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001740#endif
blueswir1db7b5422007-05-26 17:36:03 +00001741 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001742 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001743 }
1744
1745 return 0;
1746}
1747
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001748static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001749{
Anthony Liguoric227f092009-10-01 16:12:16 -05001750 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001751
Anthony Liguori7267c092011-08-20 22:09:37 -05001752 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001753
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001754 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001755 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001756 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001757 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001758 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001759#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001760 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1761 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001762#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001763 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001764
1765 return mmio;
1766}
1767
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001768static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001769{
1770 MemoryRegionSection section = {
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001771 .address_space = &address_space_memory,
Avi Kivity5312bd82012-02-12 18:32:55 +02001772 .mr = mr,
1773 .offset_within_address_space = 0,
1774 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001775 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001776 };
1777
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001778 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001779}
1780
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001781MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001782{
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001783 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001784}
1785
Avi Kivitye9179ce2009-06-14 11:38:52 +03001786static void io_mem_init(void)
1787{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001788 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1789 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001790 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001791 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001792 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001793 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001794 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001795}
1796
Avi Kivityac1970f2012-10-03 16:22:53 +02001797static void mem_begin(MemoryListener *listener)
1798{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001799 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001800 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1801 uint16_t n;
1802
1803 n = dummy_section(&d->map, &io_mem_unassigned);
1804 assert(n == PHYS_SECTION_UNASSIGNED);
1805 n = dummy_section(&d->map, &io_mem_notdirty);
1806 assert(n == PHYS_SECTION_NOTDIRTY);
1807 n = dummy_section(&d->map, &io_mem_rom);
1808 assert(n == PHYS_SECTION_ROM);
1809 n = dummy_section(&d->map, &io_mem_watch);
1810 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001811
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001812 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001813 d->as = as;
1814 as->next_dispatch = d;
1815}
1816
1817static void mem_commit(MemoryListener *listener)
1818{
1819 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001820 AddressSpaceDispatch *cur = as->dispatch;
1821 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001822
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001823 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001824
Paolo Bonzini0475d942013-05-29 12:28:21 +02001825 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001826
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001827 if (cur) {
1828 phys_sections_free(&cur->map);
1829 g_free(cur);
1830 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001831}
1832
Avi Kivity1d711482012-10-02 18:54:45 +02001833static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001834{
Andreas Färber182735e2013-05-29 22:29:20 +02001835 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001836
1837 /* since each CPU stores ram addresses in its TLB cache, we must
1838 reset the modified entries */
1839 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001840 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001841 CPUArchState *env = cpu->env_ptr;
1842
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01001843 /* FIXME: Disentangle the cpu.h circular files deps so we can
1844 directly get the right CPU from listener. */
1845 if (cpu->tcg_as_listener != listener) {
1846 continue;
1847 }
Avi Kivity117712c2012-02-12 21:23:17 +02001848 tlb_flush(env, 1);
1849 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001850}
1851
Avi Kivity93632742012-02-08 16:54:16 +02001852static void core_log_global_start(MemoryListener *listener)
1853{
Juan Quintela981fdf22013-10-10 11:54:09 +02001854 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001855}
1856
1857static void core_log_global_stop(MemoryListener *listener)
1858{
Juan Quintela981fdf22013-10-10 11:54:09 +02001859 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001860}
1861
Avi Kivity93632742012-02-08 16:54:16 +02001862static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001863 .log_global_start = core_log_global_start,
1864 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001865 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001866};
1867
Avi Kivityac1970f2012-10-03 16:22:53 +02001868void address_space_init_dispatch(AddressSpace *as)
1869{
Paolo Bonzini00752702013-05-29 12:13:54 +02001870 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001871 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001872 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001873 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001874 .region_add = mem_add,
1875 .region_nop = mem_add,
1876 .priority = 0,
1877 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001878 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001879}
1880
Avi Kivity83f3c252012-10-07 12:59:55 +02001881void address_space_destroy_dispatch(AddressSpace *as)
1882{
1883 AddressSpaceDispatch *d = as->dispatch;
1884
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001885 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001886 g_free(d);
1887 as->dispatch = NULL;
1888}
1889
Avi Kivity62152b82011-07-26 14:26:14 +03001890static void memory_map_init(void)
1891{
Anthony Liguori7267c092011-08-20 22:09:37 -05001892 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001893
Paolo Bonzini57271d62013-11-07 17:14:37 +01001894 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001895 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001896
Anthony Liguori7267c092011-08-20 22:09:37 -05001897 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001898 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1899 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001900 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001901
Avi Kivityf6790af2012-10-02 20:13:51 +02001902 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001903}
1904
1905MemoryRegion *get_system_memory(void)
1906{
1907 return system_memory;
1908}
1909
Avi Kivity309cb472011-08-08 16:09:03 +03001910MemoryRegion *get_system_io(void)
1911{
1912 return system_io;
1913}
1914
pbrooke2eef172008-06-08 01:09:01 +00001915#endif /* !defined(CONFIG_USER_ONLY) */
1916
bellard13eb76e2004-01-24 15:23:36 +00001917/* physical memory access (slow version, mainly for debug) */
1918#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001919int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001920 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001921{
1922 int l, flags;
1923 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001924 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001925
1926 while (len > 0) {
1927 page = addr & TARGET_PAGE_MASK;
1928 l = (page + TARGET_PAGE_SIZE) - addr;
1929 if (l > len)
1930 l = len;
1931 flags = page_get_flags(page);
1932 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001933 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001934 if (is_write) {
1935 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001936 return -1;
bellard579a97f2007-11-11 14:26:47 +00001937 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001938 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001939 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001940 memcpy(p, buf, l);
1941 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001942 } else {
1943 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001944 return -1;
bellard579a97f2007-11-11 14:26:47 +00001945 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001946 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001947 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001948 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001949 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001950 }
1951 len -= l;
1952 buf += l;
1953 addr += l;
1954 }
Paul Brooka68fe892010-03-01 00:08:59 +00001955 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001956}
bellard8df1cd02005-01-28 22:37:22 +00001957
bellard13eb76e2004-01-24 15:23:36 +00001958#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001959
Avi Kivitya8170e52012-10-23 12:30:10 +02001960static void invalidate_and_set_dirty(hwaddr addr,
1961 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001962{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001963 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001964 /* invalidate code */
1965 tb_invalidate_phys_page_range(addr, addr + length, 0);
1966 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001967 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1968 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001969 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001970 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001971}
1972
Richard Henderson23326162013-07-08 14:55:59 -07001973static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001974{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001975 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001976
1977 /* Regions are assumed to support 1-4 byte accesses unless
1978 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001979 if (access_size_max == 0) {
1980 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001981 }
Richard Henderson23326162013-07-08 14:55:59 -07001982
1983 /* Bound the maximum access by the alignment of the address. */
1984 if (!mr->ops->impl.unaligned) {
1985 unsigned align_size_max = addr & -addr;
1986 if (align_size_max != 0 && align_size_max < access_size_max) {
1987 access_size_max = align_size_max;
1988 }
1989 }
1990
1991 /* Don't attempt accesses larger than the maximum. */
1992 if (l > access_size_max) {
1993 l = access_size_max;
1994 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001995 if (l & (l - 1)) {
1996 l = 1 << (qemu_fls(l) - 1);
1997 }
Richard Henderson23326162013-07-08 14:55:59 -07001998
1999 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002000}
2001
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002002bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002003 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002004{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002005 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002006 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002007 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002008 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002009 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002010 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002011
bellard13eb76e2004-01-24 15:23:36 +00002012 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002013 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002014 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002015
bellard13eb76e2004-01-24 15:23:36 +00002016 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002017 if (!memory_access_is_direct(mr, is_write)) {
2018 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002019 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002020 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002021 switch (l) {
2022 case 8:
2023 /* 64 bit write access */
2024 val = ldq_p(buf);
2025 error |= io_mem_write(mr, addr1, val, 8);
2026 break;
2027 case 4:
bellard1c213d12005-09-03 10:49:04 +00002028 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002029 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002030 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002031 break;
2032 case 2:
bellard1c213d12005-09-03 10:49:04 +00002033 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002034 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002035 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002036 break;
2037 case 1:
bellard1c213d12005-09-03 10:49:04 +00002038 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002039 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002040 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002041 break;
2042 default:
2043 abort();
bellard13eb76e2004-01-24 15:23:36 +00002044 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002045 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002046 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002047 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002048 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002049 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002050 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002051 }
2052 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002053 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002054 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002055 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002056 switch (l) {
2057 case 8:
2058 /* 64 bit read access */
2059 error |= io_mem_read(mr, addr1, &val, 8);
2060 stq_p(buf, val);
2061 break;
2062 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002063 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002064 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002065 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002066 break;
2067 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002068 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002069 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002070 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002071 break;
2072 case 1:
bellard1c213d12005-09-03 10:49:04 +00002073 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002074 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002075 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002076 break;
2077 default:
2078 abort();
bellard13eb76e2004-01-24 15:23:36 +00002079 }
2080 } else {
2081 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002082 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002083 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002084 }
2085 }
2086 len -= l;
2087 buf += l;
2088 addr += l;
2089 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002090
2091 return error;
bellard13eb76e2004-01-24 15:23:36 +00002092}
bellard8df1cd02005-01-28 22:37:22 +00002093
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002094bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002095 const uint8_t *buf, int len)
2096{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002097 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002098}
2099
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002100bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002101{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002102 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002103}
2104
2105
Avi Kivitya8170e52012-10-23 12:30:10 +02002106void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002107 int len, int is_write)
2108{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002109 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002110}
2111
Alexander Graf582b55a2013-12-11 14:17:44 +01002112enum write_rom_type {
2113 WRITE_DATA,
2114 FLUSH_CACHE,
2115};
2116
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002117static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002118 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002119{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002120 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002121 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002122 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002123 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002124
bellardd0ecd2a2006-04-23 17:14:48 +00002125 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002126 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002127 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002128
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002129 if (!(memory_region_is_ram(mr) ||
2130 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002131 /* do nothing */
2132 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002133 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002134 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002135 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002136 switch (type) {
2137 case WRITE_DATA:
2138 memcpy(ptr, buf, l);
2139 invalidate_and_set_dirty(addr1, l);
2140 break;
2141 case FLUSH_CACHE:
2142 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2143 break;
2144 }
bellardd0ecd2a2006-04-23 17:14:48 +00002145 }
2146 len -= l;
2147 buf += l;
2148 addr += l;
2149 }
2150}
2151
Alexander Graf582b55a2013-12-11 14:17:44 +01002152/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002153void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002154 const uint8_t *buf, int len)
2155{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002156 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002157}
2158
2159void cpu_flush_icache_range(hwaddr start, int len)
2160{
2161 /*
2162 * This function should do the same thing as an icache flush that was
2163 * triggered from within the guest. For TCG we are always cache coherent,
2164 * so there is no need to flush anything. For KVM / Xen we need to flush
2165 * the host's instruction cache at least.
2166 */
2167 if (tcg_enabled()) {
2168 return;
2169 }
2170
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002171 cpu_physical_memory_write_rom_internal(&address_space_memory,
2172 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002173}
2174
aliguori6d16c2f2009-01-22 16:59:11 +00002175typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002176 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002177 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002178 hwaddr addr;
2179 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002180} BounceBuffer;
2181
2182static BounceBuffer bounce;
2183
aliguoriba223c22009-01-22 16:59:16 +00002184typedef struct MapClient {
2185 void *opaque;
2186 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002187 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002188} MapClient;
2189
Blue Swirl72cf2d42009-09-12 07:36:22 +00002190static QLIST_HEAD(map_client_list, MapClient) map_client_list
2191 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002192
2193void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2194{
Anthony Liguori7267c092011-08-20 22:09:37 -05002195 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002196
2197 client->opaque = opaque;
2198 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002199 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002200 return client;
2201}
2202
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002203static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002204{
2205 MapClient *client = (MapClient *)_client;
2206
Blue Swirl72cf2d42009-09-12 07:36:22 +00002207 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002208 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002209}
2210
2211static void cpu_notify_map_clients(void)
2212{
2213 MapClient *client;
2214
Blue Swirl72cf2d42009-09-12 07:36:22 +00002215 while (!QLIST_EMPTY(&map_client_list)) {
2216 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002217 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002218 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002219 }
2220}
2221
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002222bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2223{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002224 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002225 hwaddr l, xlat;
2226
2227 while (len > 0) {
2228 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002229 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2230 if (!memory_access_is_direct(mr, is_write)) {
2231 l = memory_access_size(mr, l, addr);
2232 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002233 return false;
2234 }
2235 }
2236
2237 len -= l;
2238 addr += l;
2239 }
2240 return true;
2241}
2242
aliguori6d16c2f2009-01-22 16:59:11 +00002243/* Map a physical memory region into a host virtual address.
2244 * May map a subset of the requested range, given by and returned in *plen.
2245 * May return NULL if resources needed to perform the mapping are exhausted.
2246 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002247 * Use cpu_register_map_client() to know when retrying the map operation is
2248 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002249 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002250void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002251 hwaddr addr,
2252 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002253 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002254{
Avi Kivitya8170e52012-10-23 12:30:10 +02002255 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002256 hwaddr done = 0;
2257 hwaddr l, xlat, base;
2258 MemoryRegion *mr, *this_mr;
2259 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002260
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002261 if (len == 0) {
2262 return NULL;
2263 }
aliguori6d16c2f2009-01-22 16:59:11 +00002264
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002265 l = len;
2266 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2267 if (!memory_access_is_direct(mr, is_write)) {
2268 if (bounce.buffer) {
2269 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002270 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002271 /* Avoid unbounded allocations */
2272 l = MIN(l, TARGET_PAGE_SIZE);
2273 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002274 bounce.addr = addr;
2275 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002276
2277 memory_region_ref(mr);
2278 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002279 if (!is_write) {
2280 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002281 }
aliguori6d16c2f2009-01-22 16:59:11 +00002282
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002283 *plen = l;
2284 return bounce.buffer;
2285 }
2286
2287 base = xlat;
2288 raddr = memory_region_get_ram_addr(mr);
2289
2290 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002291 len -= l;
2292 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002293 done += l;
2294 if (len == 0) {
2295 break;
2296 }
2297
2298 l = len;
2299 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2300 if (this_mr != mr || xlat != base + done) {
2301 break;
2302 }
aliguori6d16c2f2009-01-22 16:59:11 +00002303 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002304
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002305 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002306 *plen = done;
2307 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002308}
2309
Avi Kivityac1970f2012-10-03 16:22:53 +02002310/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002311 * Will also mark the memory as dirty if is_write == 1. access_len gives
2312 * the amount of memory that was actually read or written by the caller.
2313 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002314void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2315 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002316{
2317 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002318 MemoryRegion *mr;
2319 ram_addr_t addr1;
2320
2321 mr = qemu_ram_addr_from_host(buffer, &addr1);
2322 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002323 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002324 while (access_len) {
2325 unsigned l;
2326 l = TARGET_PAGE_SIZE;
2327 if (l > access_len)
2328 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002329 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002330 addr1 += l;
2331 access_len -= l;
2332 }
2333 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002334 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002335 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002336 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002337 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002338 return;
2339 }
2340 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002341 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002342 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002343 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002344 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002345 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002346 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002347}
bellardd0ecd2a2006-04-23 17:14:48 +00002348
Avi Kivitya8170e52012-10-23 12:30:10 +02002349void *cpu_physical_memory_map(hwaddr addr,
2350 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002351 int is_write)
2352{
2353 return address_space_map(&address_space_memory, addr, plen, is_write);
2354}
2355
Avi Kivitya8170e52012-10-23 12:30:10 +02002356void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2357 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002358{
2359 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2360}
2361
bellard8df1cd02005-01-28 22:37:22 +00002362/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002363static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002364 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002365{
bellard8df1cd02005-01-28 22:37:22 +00002366 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002367 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002368 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002369 hwaddr l = 4;
2370 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002371
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002372 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002373 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002374 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002375 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002376#if defined(TARGET_WORDS_BIGENDIAN)
2377 if (endian == DEVICE_LITTLE_ENDIAN) {
2378 val = bswap32(val);
2379 }
2380#else
2381 if (endian == DEVICE_BIG_ENDIAN) {
2382 val = bswap32(val);
2383 }
2384#endif
bellard8df1cd02005-01-28 22:37:22 +00002385 } else {
2386 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002387 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002388 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002389 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002390 switch (endian) {
2391 case DEVICE_LITTLE_ENDIAN:
2392 val = ldl_le_p(ptr);
2393 break;
2394 case DEVICE_BIG_ENDIAN:
2395 val = ldl_be_p(ptr);
2396 break;
2397 default:
2398 val = ldl_p(ptr);
2399 break;
2400 }
bellard8df1cd02005-01-28 22:37:22 +00002401 }
2402 return val;
2403}
2404
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002405uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002406{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002407 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002408}
2409
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002410uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002411{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002412 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002413}
2414
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002415uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002416{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002417 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002418}
2419
bellard84b7b8e2005-11-28 21:19:04 +00002420/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002421static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002422 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002423{
bellard84b7b8e2005-11-28 21:19:04 +00002424 uint8_t *ptr;
2425 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002426 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002427 hwaddr l = 8;
2428 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002429
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002430 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002431 false);
2432 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002433 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002434 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002435#if defined(TARGET_WORDS_BIGENDIAN)
2436 if (endian == DEVICE_LITTLE_ENDIAN) {
2437 val = bswap64(val);
2438 }
2439#else
2440 if (endian == DEVICE_BIG_ENDIAN) {
2441 val = bswap64(val);
2442 }
2443#endif
bellard84b7b8e2005-11-28 21:19:04 +00002444 } else {
2445 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002446 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002447 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002448 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002449 switch (endian) {
2450 case DEVICE_LITTLE_ENDIAN:
2451 val = ldq_le_p(ptr);
2452 break;
2453 case DEVICE_BIG_ENDIAN:
2454 val = ldq_be_p(ptr);
2455 break;
2456 default:
2457 val = ldq_p(ptr);
2458 break;
2459 }
bellard84b7b8e2005-11-28 21:19:04 +00002460 }
2461 return val;
2462}
2463
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002464uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002465{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002466 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002467}
2468
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002469uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002470{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002471 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002472}
2473
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002474uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002475{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002476 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002477}
2478
bellardaab33092005-10-30 20:48:42 +00002479/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002480uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002481{
2482 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002483 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002484 return val;
2485}
2486
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002487/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002488static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002489 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002490{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002491 uint8_t *ptr;
2492 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002493 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002494 hwaddr l = 2;
2495 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002496
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002497 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002498 false);
2499 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002500 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002501 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002502#if defined(TARGET_WORDS_BIGENDIAN)
2503 if (endian == DEVICE_LITTLE_ENDIAN) {
2504 val = bswap16(val);
2505 }
2506#else
2507 if (endian == DEVICE_BIG_ENDIAN) {
2508 val = bswap16(val);
2509 }
2510#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002511 } else {
2512 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002513 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002514 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002515 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002516 switch (endian) {
2517 case DEVICE_LITTLE_ENDIAN:
2518 val = lduw_le_p(ptr);
2519 break;
2520 case DEVICE_BIG_ENDIAN:
2521 val = lduw_be_p(ptr);
2522 break;
2523 default:
2524 val = lduw_p(ptr);
2525 break;
2526 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002527 }
2528 return val;
bellardaab33092005-10-30 20:48:42 +00002529}
2530
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002531uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002532{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002533 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002534}
2535
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002536uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002537{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002538 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002539}
2540
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002541uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002542{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002543 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002544}
2545
bellard8df1cd02005-01-28 22:37:22 +00002546/* warning: addr must be aligned. The ram page is not masked as dirty
2547 and the code inside is not invalidated. It is useful if the dirty
2548 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002549void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002550{
bellard8df1cd02005-01-28 22:37:22 +00002551 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002552 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002553 hwaddr l = 4;
2554 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002555
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002556 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002557 true);
2558 if (l < 4 || !memory_access_is_direct(mr, true)) {
2559 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002560 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002561 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002562 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002563 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002564
2565 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002566 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002567 /* invalidate code */
2568 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2569 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002570 cpu_physical_memory_set_dirty_flag(addr1,
2571 DIRTY_MEMORY_MIGRATION);
2572 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002573 }
2574 }
bellard8df1cd02005-01-28 22:37:22 +00002575 }
2576}
2577
2578/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002579static inline void stl_phys_internal(AddressSpace *as,
2580 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002581 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002582{
bellard8df1cd02005-01-28 22:37:22 +00002583 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002584 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002585 hwaddr l = 4;
2586 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002587
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002588 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002589 true);
2590 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002591#if defined(TARGET_WORDS_BIGENDIAN)
2592 if (endian == DEVICE_LITTLE_ENDIAN) {
2593 val = bswap32(val);
2594 }
2595#else
2596 if (endian == DEVICE_BIG_ENDIAN) {
2597 val = bswap32(val);
2598 }
2599#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002600 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002601 } else {
bellard8df1cd02005-01-28 22:37:22 +00002602 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002603 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002604 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002605 switch (endian) {
2606 case DEVICE_LITTLE_ENDIAN:
2607 stl_le_p(ptr, val);
2608 break;
2609 case DEVICE_BIG_ENDIAN:
2610 stl_be_p(ptr, val);
2611 break;
2612 default:
2613 stl_p(ptr, val);
2614 break;
2615 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002616 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002617 }
2618}
2619
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002620void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002621{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002622 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002623}
2624
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002625void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002626{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002627 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002628}
2629
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002630void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002631{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002632 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002633}
2634
bellardaab33092005-10-30 20:48:42 +00002635/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002636void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002637{
2638 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002639 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002640}
2641
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002642/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002643static inline void stw_phys_internal(AddressSpace *as,
2644 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002645 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002646{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002647 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002648 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002649 hwaddr l = 2;
2650 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002651
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002652 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002653 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002654#if defined(TARGET_WORDS_BIGENDIAN)
2655 if (endian == DEVICE_LITTLE_ENDIAN) {
2656 val = bswap16(val);
2657 }
2658#else
2659 if (endian == DEVICE_BIG_ENDIAN) {
2660 val = bswap16(val);
2661 }
2662#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002663 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002664 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002665 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002666 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002667 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002668 switch (endian) {
2669 case DEVICE_LITTLE_ENDIAN:
2670 stw_le_p(ptr, val);
2671 break;
2672 case DEVICE_BIG_ENDIAN:
2673 stw_be_p(ptr, val);
2674 break;
2675 default:
2676 stw_p(ptr, val);
2677 break;
2678 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002679 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002680 }
bellardaab33092005-10-30 20:48:42 +00002681}
2682
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002683void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002684{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002685 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002686}
2687
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002688void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002689{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002690 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002691}
2692
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002693void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002694{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002695 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002696}
2697
bellardaab33092005-10-30 20:48:42 +00002698/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002699void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002700{
2701 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002702 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002703}
2704
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002705void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002706{
2707 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002708 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002709}
2710
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002711void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002712{
2713 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002714 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002715}
2716
aliguori5e2972f2009-03-28 17:51:36 +00002717/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002718int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002719 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002720{
2721 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002722 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002723 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002724
2725 while (len > 0) {
2726 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002727 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002728 /* if no physical page mapped, return an error */
2729 if (phys_addr == -1)
2730 return -1;
2731 l = (page + TARGET_PAGE_SIZE) - addr;
2732 if (l > len)
2733 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002734 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002735 if (is_write) {
2736 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2737 } else {
2738 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2739 }
bellard13eb76e2004-01-24 15:23:36 +00002740 len -= l;
2741 buf += l;
2742 addr += l;
2743 }
2744 return 0;
2745}
Paul Brooka68fe892010-03-01 00:08:59 +00002746#endif
bellard13eb76e2004-01-24 15:23:36 +00002747
Blue Swirl8e4a4242013-01-06 18:30:17 +00002748#if !defined(CONFIG_USER_ONLY)
2749
2750/*
2751 * A helper function for the _utterly broken_ virtio device model to find out if
2752 * it's running on a big endian machine. Don't do this at home kids!
2753 */
2754bool virtio_is_big_endian(void);
2755bool virtio_is_big_endian(void)
2756{
2757#if defined(TARGET_WORDS_BIGENDIAN)
2758 return true;
2759#else
2760 return false;
2761#endif
2762}
2763
2764#endif
2765
Wen Congyang76f35532012-05-07 12:04:18 +08002766#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002767bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002768{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002769 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002770 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002771
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002772 mr = address_space_translate(&address_space_memory,
2773 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002774
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002775 return !(memory_region_is_ram(mr) ||
2776 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002777}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002778
2779void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2780{
2781 RAMBlock *block;
2782
2783 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2784 func(block->host, block->offset, block->length, opaque);
2785 }
2786}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002787#endif