blob: f47b2b149e5496a04f0cf8db74c1411f960c18d8 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020053#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010054#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020055
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020056#include "qemu/range.h"
57
blueswir1db7b5422007-05-26 17:36:03 +000058//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000059
pbrook99773bd2006-04-16 15:14:59 +000060#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020061static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000062
Paolo Bonzinia3161032012-11-14 15:54:48 +010063RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030064
65static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030066static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030067
Avi Kivityf6790af2012-10-02 20:13:51 +020068AddressSpace address_space_io;
69AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020070
Paolo Bonzini0844e002013-05-24 14:37:28 +020071MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020072static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020073
pbrooke2eef172008-06-08 01:09:01 +000074#endif
bellard9fa3e852004-01-04 18:06:42 +000075
Andreas Färberbdc44642013-06-24 23:50:24 +020076struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000077/* current CPU in the current thread. It is only valid inside
78 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020079DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000080/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000081 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000082 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010083int use_icount;
bellard6a00d602005-11-21 23:25:50 +000084
pbrooke2eef172008-06-08 01:09:01 +000085#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020086
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020087typedef struct PhysPageEntry PhysPageEntry;
88
89struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020092 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020093 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020094};
95
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020096#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
97
Paolo Bonzini03f49952013-11-07 17:14:36 +010098/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010099#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100100
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200101#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100102#define P_L2_SIZE (1 << P_L2_BITS)
103
104#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
105
106typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200107
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200108typedef struct PhysPageMap {
109 unsigned sections_nb;
110 unsigned sections_nb_alloc;
111 unsigned nodes_nb;
112 unsigned nodes_nb_alloc;
113 Node *nodes;
114 MemoryRegionSection *sections;
115} PhysPageMap;
116
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200117struct AddressSpaceDispatch {
118 /* This is a multi-level map on the physical address space.
119 * The bottom level has pointers to MemoryRegionSections.
120 */
121 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200123 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200124};
125
Jan Kiszka90260c62013-05-26 21:46:51 +0200126#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
127typedef struct subpage_t {
128 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200129 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200130 hwaddr base;
131 uint16_t sub_section[TARGET_PAGE_SIZE];
132} subpage_t;
133
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200134#define PHYS_SECTION_UNASSIGNED 0
135#define PHYS_SECTION_NOTDIRTY 1
136#define PHYS_SECTION_ROM 2
137#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200138
pbrooke2eef172008-06-08 01:09:01 +0000139static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300140static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000141static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000142
Avi Kivity1ec9b902012-01-02 12:47:48 +0200143static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000144#endif
bellard54936002003-05-13 00:25:15 +0000145
Paul Brook6d9a1302010-02-28 23:55:53 +0000146#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200147
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200148static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200149{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200150 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
152 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
153 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200154 }
155}
156
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200157static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200158{
159 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200160 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200161
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200163 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200164 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100165 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166 map->nodes[ret][i].skip = 1;
167 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200168 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200169 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200170}
171
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200172static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
173 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200174 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200175{
176 PhysPageEntry *p;
177 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100178 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200179
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200180 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 lp->ptr = phys_map_node_alloc(map);
182 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200183 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100184 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200185 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200186 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187 }
188 }
189 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200190 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100192 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193
Paolo Bonzini03f49952013-11-07 17:14:36 +0100194 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200195 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200196 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200197 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 *index += step;
199 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200200 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200201 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200202 }
203 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
205}
206
Avi Kivityac1970f2012-10-03 16:22:53 +0200207static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200208 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200209 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000210{
Avi Kivity29990972012-02-13 20:21:20 +0200211 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200212 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000213
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200214 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000215}
216
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200217/* Compact a non leaf page entry. Simply detect that the entry has a single child,
218 * and update our entry so we can skip it and go directly to the destination.
219 */
220static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
221{
222 unsigned valid_ptr = P_L2_SIZE;
223 int valid = 0;
224 PhysPageEntry *p;
225 int i;
226
227 if (lp->ptr == PHYS_MAP_NODE_NIL) {
228 return;
229 }
230
231 p = nodes[lp->ptr];
232 for (i = 0; i < P_L2_SIZE; i++) {
233 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
234 continue;
235 }
236
237 valid_ptr = i;
238 valid++;
239 if (p[i].skip) {
240 phys_page_compact(&p[i], nodes, compacted);
241 }
242 }
243
244 /* We can only compress if there's only one child. */
245 if (valid != 1) {
246 return;
247 }
248
249 assert(valid_ptr < P_L2_SIZE);
250
251 /* Don't compress if it won't fit in the # of bits we have. */
252 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
253 return;
254 }
255
256 lp->ptr = p[valid_ptr].ptr;
257 if (!p[valid_ptr].skip) {
258 /* If our only child is a leaf, make this a leaf. */
259 /* By design, we should have made this node a leaf to begin with so we
260 * should never reach here.
261 * But since it's so simple to handle this, let's do it just in case we
262 * change this rule.
263 */
264 lp->skip = 0;
265 } else {
266 lp->skip += p[valid_ptr].skip;
267 }
268}
269
270static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
271{
272 DECLARE_BITMAP(compacted, nodes_nb);
273
274 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200275 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200276 }
277}
278
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200279static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200280 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000281{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200282 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200283 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200284 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200285
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200286 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200287 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200288 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200289 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200290 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100291 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200292 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200293
294 if (sections[lp.ptr].size.hi ||
295 range_covers_byte(sections[lp.ptr].offset_within_address_space,
296 sections[lp.ptr].size.lo, addr)) {
297 return &sections[lp.ptr];
298 } else {
299 return &sections[PHYS_SECTION_UNASSIGNED];
300 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200301}
302
Blue Swirle5548612012-04-21 13:08:33 +0000303bool memory_region_is_unassigned(MemoryRegion *mr)
304{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200305 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000306 && mr != &io_mem_watch;
307}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200308
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200309static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200310 hwaddr addr,
311 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200312{
Jan Kiszka90260c62013-05-26 21:46:51 +0200313 MemoryRegionSection *section;
314 subpage_t *subpage;
315
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200316 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200317 if (resolve_subpage && section->mr->subpage) {
318 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200319 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200320 }
321 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200322}
323
Jan Kiszka90260c62013-05-26 21:46:51 +0200324static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200325address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200326 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200327{
328 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100329 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200330
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200331 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200332 /* Compute offset within MemoryRegionSection */
333 addr -= section->offset_within_address_space;
334
335 /* Compute offset within MemoryRegion */
336 *xlat = addr + section->offset_within_region;
337
338 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100339 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200340 return section;
341}
Jan Kiszka90260c62013-05-26 21:46:51 +0200342
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100343static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
344{
345 if (memory_region_is_ram(mr)) {
346 return !(is_write && mr->readonly);
347 }
348 if (memory_region_is_romd(mr)) {
349 return !is_write;
350 }
351
352 return false;
353}
354
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200355MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
356 hwaddr *xlat, hwaddr *plen,
357 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200358{
Avi Kivity30951152012-10-30 13:47:46 +0200359 IOMMUTLBEntry iotlb;
360 MemoryRegionSection *section;
361 MemoryRegion *mr;
362 hwaddr len = *plen;
363
364 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100365 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200366 mr = section->mr;
367
368 if (!mr->iommu_ops) {
369 break;
370 }
371
372 iotlb = mr->iommu_ops->translate(mr, addr);
373 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
374 | (addr & iotlb.addr_mask));
375 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
376 if (!(iotlb.perm & (1 << is_write))) {
377 mr = &io_mem_unassigned;
378 break;
379 }
380
381 as = iotlb.target_as;
382 }
383
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100384 if (memory_access_is_direct(mr, is_write)) {
385 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
386 len = MIN(page, len);
387 }
388
Avi Kivity30951152012-10-30 13:47:46 +0200389 *plen = len;
390 *xlat = addr;
391 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200392}
393
394MemoryRegionSection *
395address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
396 hwaddr *plen)
397{
Avi Kivity30951152012-10-30 13:47:46 +0200398 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200399 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200400
401 assert(!section->mr->iommu_ops);
402 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200403}
bellard9fa3e852004-01-04 18:06:42 +0000404#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000405
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200406void cpu_exec_init_all(void)
407{
408#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700409 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200410 memory_map_init();
411 io_mem_init();
412#endif
413}
414
Andreas Färberb170fce2013-01-20 20:23:22 +0100415#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000416
Juan Quintelae59fb372009-09-29 22:48:21 +0200417static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200418{
Andreas Färber259186a2013-01-17 18:51:17 +0100419 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200420
aurel323098dba2009-03-07 21:28:24 +0000421 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
422 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100423 cpu->interrupt_request &= ~0x01;
424 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000425
426 return 0;
427}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200428
Andreas Färber1a1562f2013-06-17 04:09:11 +0200429const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200430 .name = "cpu_common",
431 .version_id = 1,
432 .minimum_version_id = 1,
433 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200434 .post_load = cpu_common_post_load,
435 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100436 VMSTATE_UINT32(halted, CPUState),
437 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200438 VMSTATE_END_OF_LIST()
439 }
440};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200441
pbrook9656f322008-07-01 20:01:19 +0000442#endif
443
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100444CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400445{
Andreas Färberbdc44642013-06-24 23:50:24 +0200446 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400447
Andreas Färberbdc44642013-06-24 23:50:24 +0200448 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100449 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200450 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100451 }
Glauber Costa950f1472009-06-09 12:15:18 -0400452 }
453
Andreas Färberbdc44642013-06-24 23:50:24 +0200454 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400455}
456
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000457#if !defined(CONFIG_USER_ONLY)
458void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
459{
460 /* We only support one address space per cpu at the moment. */
461 assert(cpu->as == as);
462
463 if (cpu->tcg_as_listener) {
464 memory_listener_unregister(cpu->tcg_as_listener);
465 } else {
466 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
467 }
468 cpu->tcg_as_listener->commit = tcg_commit;
469 memory_listener_register(cpu->tcg_as_listener, as);
470}
471#endif
472
Andreas Färber9349b4f2012-03-14 01:38:32 +0100473void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000474{
Andreas Färber9f09e182012-05-03 06:59:07 +0200475 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100476 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200477 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000478 int cpu_index;
479
pbrookc2764712009-03-07 15:24:59 +0000480#if defined(CONFIG_USER_ONLY)
481 cpu_list_lock();
482#endif
bellard6a00d602005-11-21 23:25:50 +0000483 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200484 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000485 cpu_index++;
486 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100487 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100488 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000489 QTAILQ_INIT(&env->breakpoints);
490 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100491#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000492 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200493 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100494#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200495 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000496#if defined(CONFIG_USER_ONLY)
497 cpu_list_unlock();
498#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200499 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
500 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
501 }
pbrookb3c77242008-06-30 16:31:04 +0000502#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600503 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000504 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100505 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200506 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000507#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100508 if (cc->vmsd != NULL) {
509 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
510 }
bellardfd6ce8f2003-05-14 19:00:11 +0000511}
512
bellard1fddef42005-04-17 19:16:13 +0000513#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000514#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200515static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000516{
517 tb_invalidate_phys_page_range(pc, pc + 1, 0);
518}
519#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200520static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400521{
Max Filippove8262a12013-09-27 22:29:17 +0400522 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
523 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000524 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100525 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400526 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400527}
bellardc27004e2005-01-03 23:35:10 +0000528#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000529#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000530
Paul Brookc527ee82010-03-01 03:31:14 +0000531#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100532void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000533
534{
535}
536
Andreas Färber9349b4f2012-03-14 01:38:32 +0100537int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000538 int flags, CPUWatchpoint **watchpoint)
539{
540 return -ENOSYS;
541}
542#else
pbrook6658ffb2007-03-16 23:58:11 +0000543/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100544int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000545 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000546{
aliguorib4051332008-11-18 20:14:20 +0000547 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000548 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000549
aliguorib4051332008-11-18 20:14:20 +0000550 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400551 if ((len & (len - 1)) || (addr & ~len_mask) ||
552 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000553 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
554 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
555 return -EINVAL;
556 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500557 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000558
aliguoria1d1bb32008-11-18 20:07:32 +0000559 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000560 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000561 wp->flags = flags;
562
aliguori2dc9f412008-11-18 20:56:59 +0000563 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000564 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000566 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000567 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000568
pbrook6658ffb2007-03-16 23:58:11 +0000569 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000570
571 if (watchpoint)
572 *watchpoint = wp;
573 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000574}
575
aliguoria1d1bb32008-11-18 20:07:32 +0000576/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100577int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000578 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000579{
aliguorib4051332008-11-18 20:14:20 +0000580 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000581 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000582
Blue Swirl72cf2d42009-09-12 07:36:22 +0000583 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000584 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000585 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000586 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000587 return 0;
588 }
589 }
aliguoria1d1bb32008-11-18 20:07:32 +0000590 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000591}
592
aliguoria1d1bb32008-11-18 20:07:32 +0000593/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100594void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000595{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000596 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000597
aliguoria1d1bb32008-11-18 20:07:32 +0000598 tlb_flush_page(env, watchpoint->vaddr);
599
Anthony Liguori7267c092011-08-20 22:09:37 -0500600 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000601}
602
aliguoria1d1bb32008-11-18 20:07:32 +0000603/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100604void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000605{
aliguoric0ce9982008-11-25 22:13:57 +0000606 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000607
Blue Swirl72cf2d42009-09-12 07:36:22 +0000608 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000609 if (wp->flags & mask)
610 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000611 }
aliguoria1d1bb32008-11-18 20:07:32 +0000612}
Paul Brookc527ee82010-03-01 03:31:14 +0000613#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000614
615/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100616int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000617 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000618{
bellard1fddef42005-04-17 19:16:13 +0000619#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000620 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000621
Anthony Liguori7267c092011-08-20 22:09:37 -0500622 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000623
624 bp->pc = pc;
625 bp->flags = flags;
626
aliguori2dc9f412008-11-18 20:56:59 +0000627 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200628 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000629 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200630 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000631 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200632 }
aliguoria1d1bb32008-11-18 20:07:32 +0000633
Andreas Färber00b941e2013-06-29 18:55:54 +0200634 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000635
Andreas Färber00b941e2013-06-29 18:55:54 +0200636 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000637 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200638 }
aliguoria1d1bb32008-11-18 20:07:32 +0000639 return 0;
640#else
641 return -ENOSYS;
642#endif
643}
644
645/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100646int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000647{
648#if defined(TARGET_HAS_ICE)
649 CPUBreakpoint *bp;
650
Blue Swirl72cf2d42009-09-12 07:36:22 +0000651 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000652 if (bp->pc == pc && bp->flags == flags) {
653 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000654 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000655 }
bellard4c3a88a2003-07-26 12:06:08 +0000656 }
aliguoria1d1bb32008-11-18 20:07:32 +0000657 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000658#else
aliguoria1d1bb32008-11-18 20:07:32 +0000659 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000660#endif
661}
662
aliguoria1d1bb32008-11-18 20:07:32 +0000663/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100664void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000665{
bellard1fddef42005-04-17 19:16:13 +0000666#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000667 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000668
Andreas Färber00b941e2013-06-29 18:55:54 +0200669 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000670
Anthony Liguori7267c092011-08-20 22:09:37 -0500671 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000672#endif
673}
674
675/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100676void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000677{
678#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000679 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000680
Blue Swirl72cf2d42009-09-12 07:36:22 +0000681 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000682 if (bp->flags & mask)
683 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000684 }
bellard4c3a88a2003-07-26 12:06:08 +0000685#endif
686}
687
bellardc33a3462003-07-29 20:50:33 +0000688/* enable or disable single step mode. EXCP_DEBUG is returned by the
689 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200690void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000691{
bellard1fddef42005-04-17 19:16:13 +0000692#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200693 if (cpu->singlestep_enabled != enabled) {
694 cpu->singlestep_enabled = enabled;
695 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200696 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200697 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100698 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000699 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200700 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000701 tb_flush(env);
702 }
bellardc33a3462003-07-29 20:50:33 +0000703 }
704#endif
705}
706
Andreas Färber9349b4f2012-03-14 01:38:32 +0100707void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000708{
Andreas Färber878096e2013-05-27 01:33:50 +0200709 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000710 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000711 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000712
713 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000714 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000715 fprintf(stderr, "qemu: fatal: ");
716 vfprintf(stderr, fmt, ap);
717 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200718 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000719 if (qemu_log_enabled()) {
720 qemu_log("qemu: fatal: ");
721 qemu_log_vprintf(fmt, ap2);
722 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200723 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000724 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000725 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000726 }
pbrook493ae1f2007-11-23 16:53:59 +0000727 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000728 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200729#if defined(CONFIG_USER_ONLY)
730 {
731 struct sigaction act;
732 sigfillset(&act.sa_mask);
733 act.sa_handler = SIG_DFL;
734 sigaction(SIGABRT, &act, NULL);
735 }
736#endif
bellard75012672003-06-21 13:11:07 +0000737 abort();
738}
739
bellard01243112004-01-04 15:48:17 +0000740#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200741static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
742{
743 RAMBlock *block;
744
745 /* The list is protected by the iothread lock here. */
746 block = ram_list.mru_block;
747 if (block && addr - block->offset < block->length) {
748 goto found;
749 }
750 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
751 if (addr - block->offset < block->length) {
752 goto found;
753 }
754 }
755
756 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
757 abort();
758
759found:
760 ram_list.mru_block = block;
761 return block;
762}
763
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200764static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000765{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200766 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200767 RAMBlock *block;
768 ram_addr_t end;
769
770 end = TARGET_PAGE_ALIGN(start + length);
771 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000772
Paolo Bonzini041603f2013-09-09 17:49:45 +0200773 block = qemu_get_ram_block(start);
774 assert(block == qemu_get_ram_block(end - 1));
775 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000776 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200777}
778
779/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200780void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200781 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200782{
Juan Quintelad24981d2012-05-22 00:42:40 +0200783 if (length == 0)
784 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200785 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200786
787 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200788 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200789 }
bellard1ccde1c2004-02-06 19:46:14 +0000790}
791
Juan Quintela981fdf22013-10-10 11:54:09 +0200792static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000793{
794 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000795}
796
Avi Kivitya8170e52012-10-23 12:30:10 +0200797hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200798 MemoryRegionSection *section,
799 target_ulong vaddr,
800 hwaddr paddr, hwaddr xlat,
801 int prot,
802 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000803{
Avi Kivitya8170e52012-10-23 12:30:10 +0200804 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000805 CPUWatchpoint *wp;
806
Blue Swirlcc5bea62012-04-14 14:56:48 +0000807 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000808 /* Normal RAM. */
809 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200810 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000811 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200812 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000813 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200814 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000815 }
816 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100817 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200818 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000819 }
820
821 /* Make accesses to pages with watchpoints go via the
822 watchpoint trap routines. */
823 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
824 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
825 /* Avoid trapping reads of pages with a write breakpoint. */
826 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200827 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000828 *address |= TLB_MMIO;
829 break;
830 }
831 }
832 }
833
834 return iotlb;
835}
bellard9fa3e852004-01-04 18:06:42 +0000836#endif /* defined(CONFIG_USER_ONLY) */
837
pbrooke2eef172008-06-08 01:09:01 +0000838#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000839
Anthony Liguoric227f092009-10-01 16:12:16 -0500840static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200841 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200842static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200843
Stefan Weil575ddeb2013-09-29 20:56:45 +0200844static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200845
846/*
847 * Set a custom physical guest memory alloator.
848 * Accelerators with unusual needs may need this. Hopefully, we can
849 * get rid of it eventually.
850 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200851void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200852{
853 phys_mem_alloc = alloc;
854}
855
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200856static uint16_t phys_section_add(PhysPageMap *map,
857 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200858{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200859 /* The physical section number is ORed with a page-aligned
860 * pointer to produce the iotlb entries. Thus it should
861 * never overflow into the page-aligned value.
862 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200863 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200864
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200865 if (map->sections_nb == map->sections_nb_alloc) {
866 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
867 map->sections = g_renew(MemoryRegionSection, map->sections,
868 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200869 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200870 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200871 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200872 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200873}
874
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200875static void phys_section_destroy(MemoryRegion *mr)
876{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200877 memory_region_unref(mr);
878
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200879 if (mr->subpage) {
880 subpage_t *subpage = container_of(mr, subpage_t, iomem);
881 memory_region_destroy(&subpage->iomem);
882 g_free(subpage);
883 }
884}
885
Paolo Bonzini60926662013-05-29 12:30:26 +0200886static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200887{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200888 while (map->sections_nb > 0) {
889 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200890 phys_section_destroy(section->mr);
891 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200892 g_free(map->sections);
893 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200894}
895
Avi Kivityac1970f2012-10-03 16:22:53 +0200896static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200897{
898 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200899 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200900 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200901 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200902 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200903 MemoryRegionSection subsection = {
904 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200905 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200906 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200907 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200908
Avi Kivityf3705d52012-03-08 16:16:34 +0200909 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200910
Avi Kivityf3705d52012-03-08 16:16:34 +0200911 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200912 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100913 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200914 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200915 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200916 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200917 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200918 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200919 }
920 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200921 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200922 subpage_register(subpage, start, end,
923 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200924}
925
926
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200927static void register_multipage(AddressSpaceDispatch *d,
928 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000929{
Avi Kivitya8170e52012-10-23 12:30:10 +0200930 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200931 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200932 uint64_t num_pages = int128_get64(int128_rshift(section->size,
933 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200934
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200935 assert(num_pages);
936 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000937}
938
Avi Kivityac1970f2012-10-03 16:22:53 +0200939static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200940{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200941 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200942 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200943 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200944 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200945
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200946 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
947 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
948 - now.offset_within_address_space;
949
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200950 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200951 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200952 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200953 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200954 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200955 while (int128_ne(remain.size, now.size)) {
956 remain.size = int128_sub(remain.size, now.size);
957 remain.offset_within_address_space += int128_get64(now.size);
958 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400959 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200960 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200961 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800962 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200963 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200964 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400965 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200966 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200967 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400968 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200969 }
970}
971
Sheng Yang62a27442010-01-26 19:21:16 +0800972void qemu_flush_coalesced_mmio_buffer(void)
973{
974 if (kvm_enabled())
975 kvm_flush_coalesced_mmio_buffer();
976}
977
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700978void qemu_mutex_lock_ramlist(void)
979{
980 qemu_mutex_lock(&ram_list.mutex);
981}
982
983void qemu_mutex_unlock_ramlist(void)
984{
985 qemu_mutex_unlock(&ram_list.mutex);
986}
987
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200988#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300989
990#include <sys/vfs.h>
991
992#define HUGETLBFS_MAGIC 0x958458f6
993
994static long gethugepagesize(const char *path)
995{
996 struct statfs fs;
997 int ret;
998
999 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001000 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001001 } while (ret != 0 && errno == EINTR);
1002
1003 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001004 perror(path);
1005 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001006 }
1007
1008 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001009 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001010
1011 return fs.f_bsize;
1012}
1013
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001014static sigjmp_buf sigjump;
1015
1016static void sigbus_handler(int signal)
1017{
1018 siglongjmp(sigjump, 1);
1019}
1020
Alex Williamson04b16652010-07-02 11:13:17 -06001021static void *file_ram_alloc(RAMBlock *block,
1022 ram_addr_t memory,
1023 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001024{
1025 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001026 char *sanitized_name;
1027 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001028 void *area;
1029 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001030 unsigned long hpagesize;
1031
1032 hpagesize = gethugepagesize(path);
1033 if (!hpagesize) {
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001034 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001035 }
1036
1037 if (memory < hpagesize) {
1038 return NULL;
1039 }
1040
1041 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1042 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001043 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001044 }
1045
Peter Feiner8ca761f2013-03-04 13:54:25 -05001046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name = g_strdup(block->mr->name);
1048 for (c = sanitized_name; *c != '\0'; c++) {
1049 if (*c == '/')
1050 *c = '_';
1051 }
1052
1053 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1054 sanitized_name);
1055 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001056
1057 fd = mkstemp(filename);
1058 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001059 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001060 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001061 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001062 }
1063 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001064 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001065
1066 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1067
1068 /*
1069 * ftruncate is not supported by hugetlbfs in older
1070 * hosts, so don't bother bailing out on errors.
1071 * If anything goes wrong with it under other filesystems,
1072 * mmap will fail.
1073 */
1074 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001075 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001076
Marcelo Tosattic9027602010-03-01 20:25:08 -03001077 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001078 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001079 perror("file_ram_alloc: can't mmap RAM pages");
1080 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001081 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001082 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001083
1084 if (mem_prealloc) {
1085 int ret, i;
1086 struct sigaction act, oldact;
1087 sigset_t set, oldset;
1088
1089 memset(&act, 0, sizeof(act));
1090 act.sa_handler = &sigbus_handler;
1091 act.sa_flags = 0;
1092
1093 ret = sigaction(SIGBUS, &act, &oldact);
1094 if (ret) {
1095 perror("file_ram_alloc: failed to install signal handler");
1096 exit(1);
1097 }
1098
1099 /* unblock SIGBUS */
1100 sigemptyset(&set);
1101 sigaddset(&set, SIGBUS);
1102 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1103
1104 if (sigsetjmp(sigjump, 1)) {
1105 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1106 exit(1);
1107 }
1108
1109 /* MAP_POPULATE silently ignores failures */
Marcelo Tosatti2ba82852013-12-18 16:42:17 -02001110 for (i = 0; i < (memory/hpagesize); i++) {
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001111 memset(area + (hpagesize*i), 0, 1);
1112 }
1113
1114 ret = sigaction(SIGBUS, &oldact, NULL);
1115 if (ret) {
1116 perror("file_ram_alloc: failed to reinstall signal handler");
1117 exit(1);
1118 }
1119
1120 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1121 }
1122
Alex Williamson04b16652010-07-02 11:13:17 -06001123 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001124 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001125
1126error:
1127 if (mem_prealloc) {
1128 exit(1);
1129 }
1130 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001131}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001132#else
1133static void *file_ram_alloc(RAMBlock *block,
1134 ram_addr_t memory,
1135 const char *path)
1136{
1137 fprintf(stderr, "-mem-path not supported on this host\n");
1138 exit(1);
1139}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001140#endif
1141
Alex Williamsond17b5282010-06-25 11:08:38 -06001142static ram_addr_t find_ram_offset(ram_addr_t size)
1143{
Alex Williamson04b16652010-07-02 11:13:17 -06001144 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001145 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001146
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001147 assert(size != 0); /* it would hand out same offset multiple times */
1148
Paolo Bonzinia3161032012-11-14 15:54:48 +01001149 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001150 return 0;
1151
Paolo Bonzinia3161032012-11-14 15:54:48 +01001152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001153 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001154
1155 end = block->offset + block->length;
1156
Paolo Bonzinia3161032012-11-14 15:54:48 +01001157 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001158 if (next_block->offset >= end) {
1159 next = MIN(next, next_block->offset);
1160 }
1161 }
1162 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001163 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001164 mingap = next - end;
1165 }
1166 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001167
1168 if (offset == RAM_ADDR_MAX) {
1169 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1170 (uint64_t)size);
1171 abort();
1172 }
1173
Alex Williamson04b16652010-07-02 11:13:17 -06001174 return offset;
1175}
1176
Juan Quintela652d7ec2012-07-20 10:37:54 +02001177ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001178{
Alex Williamsond17b5282010-06-25 11:08:38 -06001179 RAMBlock *block;
1180 ram_addr_t last = 0;
1181
Paolo Bonzinia3161032012-11-14 15:54:48 +01001182 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001183 last = MAX(last, block->offset + block->length);
1184
1185 return last;
1186}
1187
Jason Baronddb97f12012-08-02 15:44:16 -04001188static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1189{
1190 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001191
1192 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001193 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1194 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001195 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1196 if (ret) {
1197 perror("qemu_madvise");
1198 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1199 "but dump_guest_core=off specified\n");
1200 }
1201 }
1202}
1203
Avi Kivityc5705a72011-12-20 15:59:12 +02001204void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001205{
1206 RAMBlock *new_block, *block;
1207
Avi Kivityc5705a72011-12-20 15:59:12 +02001208 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001209 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001210 if (block->offset == addr) {
1211 new_block = block;
1212 break;
1213 }
1214 }
1215 assert(new_block);
1216 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001217
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001218 if (dev) {
1219 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001220 if (id) {
1221 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001222 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001223 }
1224 }
1225 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1226
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001227 /* This assumes the iothread lock is taken here too. */
1228 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001229 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001230 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001231 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1232 new_block->idstr);
1233 abort();
1234 }
1235 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001236 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001237}
1238
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001239static int memory_try_enable_merging(void *addr, size_t len)
1240{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001241 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001242 /* disabled by the user */
1243 return 0;
1244 }
1245
1246 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1247}
1248
Avi Kivityc5705a72011-12-20 15:59:12 +02001249ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1250 MemoryRegion *mr)
1251{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001252 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001253 ram_addr_t old_ram_size, new_ram_size;
1254
1255 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001256
1257 size = TARGET_PAGE_ALIGN(size);
1258 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001259 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001260
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001261 /* This assumes the iothread lock is taken here too. */
1262 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001263 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001264 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001265 if (host) {
1266 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001267 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001268 } else if (xen_enabled()) {
1269 if (mem_path) {
1270 fprintf(stderr, "-mem-path not supported with Xen\n");
1271 exit(1);
1272 }
1273 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001274 } else {
1275 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001276 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1277 /*
1278 * file_ram_alloc() needs to allocate just like
1279 * phys_mem_alloc, but we haven't bothered to provide
1280 * a hook there.
1281 */
1282 fprintf(stderr,
1283 "-mem-path not supported with this accelerator\n");
1284 exit(1);
1285 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001286 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001287 }
1288 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001289 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001290 if (!new_block->host) {
1291 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1292 new_block->mr->name, strerror(errno));
1293 exit(1);
1294 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001295 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001296 }
1297 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001298 new_block->length = size;
1299
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001300 /* Keep the list sorted from biggest to smallest block. */
1301 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1302 if (block->length < new_block->length) {
1303 break;
1304 }
1305 }
1306 if (block) {
1307 QTAILQ_INSERT_BEFORE(block, new_block, next);
1308 } else {
1309 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1310 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001311 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001312
Umesh Deshpandef798b072011-08-18 11:41:17 -07001313 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001314 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001315
Juan Quintela2152f5c2013-10-08 13:52:02 +02001316 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1317
1318 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001319 int i;
1320 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1321 ram_list.dirty_memory[i] =
1322 bitmap_zero_extend(ram_list.dirty_memory[i],
1323 old_ram_size, new_ram_size);
1324 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001325 }
Juan Quintela75218e72013-10-08 12:31:54 +02001326 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001327
Jason Baronddb97f12012-08-02 15:44:16 -04001328 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001329 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001330 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001331
Cam Macdonell84b89d72010-07-26 18:10:57 -06001332 if (kvm_enabled())
1333 kvm_setup_guest_memory(new_block->host, size);
1334
1335 return new_block->offset;
1336}
1337
Avi Kivityc5705a72011-12-20 15:59:12 +02001338ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001339{
Avi Kivityc5705a72011-12-20 15:59:12 +02001340 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001341}
bellarde9a1ab12007-02-08 23:08:38 +00001342
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001343void qemu_ram_free_from_ptr(ram_addr_t addr)
1344{
1345 RAMBlock *block;
1346
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001347 /* This assumes the iothread lock is taken here too. */
1348 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001349 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001350 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001351 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001352 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001353 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001354 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001355 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001356 }
1357 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001358 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001359}
1360
Anthony Liguoric227f092009-10-01 16:12:16 -05001361void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001362{
Alex Williamson04b16652010-07-02 11:13:17 -06001363 RAMBlock *block;
1364
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001365 /* This assumes the iothread lock is taken here too. */
1366 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001367 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001368 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001369 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001370 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001371 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001372 if (block->flags & RAM_PREALLOC_MASK) {
1373 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001374 } else if (xen_enabled()) {
1375 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001376#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001377 } else if (block->fd >= 0) {
1378 munmap(block->host, block->length);
1379 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001380#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001381 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001382 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001383 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001384 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001385 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001386 }
1387 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001388 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001389
bellarde9a1ab12007-02-08 23:08:38 +00001390}
1391
Huang Yingcd19cfa2011-03-02 08:56:19 +01001392#ifndef _WIN32
1393void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1394{
1395 RAMBlock *block;
1396 ram_addr_t offset;
1397 int flags;
1398 void *area, *vaddr;
1399
Paolo Bonzinia3161032012-11-14 15:54:48 +01001400 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001401 offset = addr - block->offset;
1402 if (offset < block->length) {
1403 vaddr = block->host + offset;
1404 if (block->flags & RAM_PREALLOC_MASK) {
1405 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001406 } else if (xen_enabled()) {
1407 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001408 } else {
1409 flags = MAP_FIXED;
1410 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001411 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001412#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001413 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1414 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001415#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001416 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001417#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001418 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1419 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001420 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001421 /*
1422 * Remap needs to match alloc. Accelerators that
1423 * set phys_mem_alloc never remap. If they did,
1424 * we'd need a remap hook here.
1425 */
1426 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1427
Huang Yingcd19cfa2011-03-02 08:56:19 +01001428 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1429 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1430 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001431 }
1432 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001433 fprintf(stderr, "Could not remap addr: "
1434 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001435 length, addr);
1436 exit(1);
1437 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001438 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001439 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001440 }
1441 return;
1442 }
1443 }
1444}
1445#endif /* !_WIN32 */
1446
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001447/* Return a host pointer to ram allocated with qemu_ram_alloc.
1448 With the exception of the softmmu code in this file, this should
1449 only be used for local memory (e.g. video ram) that the device owns,
1450 and knows it isn't going to access beyond the end of the block.
1451
1452 It should not be used for general purpose DMA.
1453 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1454 */
1455void *qemu_get_ram_ptr(ram_addr_t addr)
1456{
1457 RAMBlock *block = qemu_get_ram_block(addr);
1458
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001459 if (xen_enabled()) {
1460 /* We need to check if the requested address is in the RAM
1461 * because we don't want to map the entire memory in QEMU.
1462 * In that case just map until the end of the page.
1463 */
1464 if (block->offset == 0) {
1465 return xen_map_cache(addr, 0, 0);
1466 } else if (block->host == NULL) {
1467 block->host =
1468 xen_map_cache(block->offset, block->length, 1);
1469 }
1470 }
1471 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001472}
1473
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001474/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1475 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001476static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001477{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001478 if (*size == 0) {
1479 return NULL;
1480 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001481 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001482 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001483 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001484 RAMBlock *block;
1485
Paolo Bonzinia3161032012-11-14 15:54:48 +01001486 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001487 if (addr - block->offset < block->length) {
1488 if (addr - block->offset + *size > block->length)
1489 *size = block->length - addr + block->offset;
1490 return block->host + (addr - block->offset);
1491 }
1492 }
1493
1494 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1495 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001496 }
1497}
1498
Paolo Bonzini7443b432013-06-03 12:44:02 +02001499/* Some of the softmmu routines need to translate from a host pointer
1500 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001501MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001502{
pbrook94a6b542009-04-11 17:15:54 +00001503 RAMBlock *block;
1504 uint8_t *host = ptr;
1505
Jan Kiszka868bb332011-06-21 22:59:09 +02001506 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001507 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001508 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001509 }
1510
Paolo Bonzini23887b72013-05-06 14:28:39 +02001511 block = ram_list.mru_block;
1512 if (block && block->host && host - block->host < block->length) {
1513 goto found;
1514 }
1515
Paolo Bonzinia3161032012-11-14 15:54:48 +01001516 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001517 /* This case append when the block is not mapped. */
1518 if (block->host == NULL) {
1519 continue;
1520 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001521 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001522 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001523 }
pbrook94a6b542009-04-11 17:15:54 +00001524 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001525
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001526 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001527
1528found:
1529 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001530 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001531}
Alex Williamsonf471a172010-06-11 11:11:42 -06001532
Avi Kivitya8170e52012-10-23 12:30:10 +02001533static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001534 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001535{
Juan Quintela52159192013-10-08 12:44:04 +02001536 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001537 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001538 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001539 switch (size) {
1540 case 1:
1541 stb_p(qemu_get_ram_ptr(ram_addr), val);
1542 break;
1543 case 2:
1544 stw_p(qemu_get_ram_ptr(ram_addr), val);
1545 break;
1546 case 4:
1547 stl_p(qemu_get_ram_ptr(ram_addr), val);
1548 break;
1549 default:
1550 abort();
1551 }
Juan Quintela52159192013-10-08 12:44:04 +02001552 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1553 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001554 /* we remove the notdirty callback only if the code has been
1555 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001556 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001557 CPUArchState *env = current_cpu->env_ptr;
1558 tlb_set_dirty(env, env->mem_io_vaddr);
1559 }
bellard1ccde1c2004-02-06 19:46:14 +00001560}
1561
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001562static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1563 unsigned size, bool is_write)
1564{
1565 return is_write;
1566}
1567
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001568static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001569 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001570 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001571 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001572};
1573
pbrook0f459d12008-06-09 00:20:13 +00001574/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001575static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001576{
Andreas Färber4917cf42013-05-27 05:17:50 +02001577 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001578 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001579 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001580 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001581 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001582
aliguori06d55cc2008-11-18 20:24:06 +00001583 if (env->watchpoint_hit) {
1584 /* We re-entered the check after replacing the TB. Now raise
1585 * the debug interrupt so that is will trigger after the
1586 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001587 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001588 return;
1589 }
pbrook2e70f6e2008-06-29 01:03:05 +00001590 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001591 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001592 if ((vaddr == (wp->vaddr & len_mask) ||
1593 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001594 wp->flags |= BP_WATCHPOINT_HIT;
1595 if (!env->watchpoint_hit) {
1596 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001597 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001598 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1599 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001600 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001601 } else {
1602 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1603 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001604 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001605 }
aliguori06d55cc2008-11-18 20:24:06 +00001606 }
aliguori6e140f22008-11-18 20:37:55 +00001607 } else {
1608 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001609 }
1610 }
1611}
1612
pbrook6658ffb2007-03-16 23:58:11 +00001613/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1614 so these check for a hit then pass through to the normal out-of-line
1615 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001616static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001617 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001618{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001619 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1620 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001621 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001622 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001623 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001624 default: abort();
1625 }
pbrook6658ffb2007-03-16 23:58:11 +00001626}
1627
Avi Kivitya8170e52012-10-23 12:30:10 +02001628static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001629 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001630{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001631 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1632 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001633 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001634 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001635 break;
1636 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001637 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001638 break;
1639 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001640 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001641 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001642 default: abort();
1643 }
pbrook6658ffb2007-03-16 23:58:11 +00001644}
1645
Avi Kivity1ec9b902012-01-02 12:47:48 +02001646static const MemoryRegionOps watch_mem_ops = {
1647 .read = watch_mem_read,
1648 .write = watch_mem_write,
1649 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001650};
pbrook6658ffb2007-03-16 23:58:11 +00001651
Avi Kivitya8170e52012-10-23 12:30:10 +02001652static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001653 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001654{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001655 subpage_t *subpage = opaque;
1656 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001657
blueswir1db7b5422007-05-26 17:36:03 +00001658#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001659 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001660 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001661#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001662 address_space_read(subpage->as, addr + subpage->base, buf, len);
1663 switch (len) {
1664 case 1:
1665 return ldub_p(buf);
1666 case 2:
1667 return lduw_p(buf);
1668 case 4:
1669 return ldl_p(buf);
1670 default:
1671 abort();
1672 }
blueswir1db7b5422007-05-26 17:36:03 +00001673}
1674
Avi Kivitya8170e52012-10-23 12:30:10 +02001675static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001676 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001677{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001678 subpage_t *subpage = opaque;
1679 uint8_t buf[4];
1680
blueswir1db7b5422007-05-26 17:36:03 +00001681#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001682 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001683 " value %"PRIx64"\n",
1684 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001685#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001686 switch (len) {
1687 case 1:
1688 stb_p(buf, value);
1689 break;
1690 case 2:
1691 stw_p(buf, value);
1692 break;
1693 case 4:
1694 stl_p(buf, value);
1695 break;
1696 default:
1697 abort();
1698 }
1699 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001700}
1701
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001702static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001703 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001704{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001705 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001706#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001707 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001708 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001709#endif
1710
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001711 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001712 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001713}
1714
Avi Kivity70c68e42012-01-02 12:32:48 +02001715static const MemoryRegionOps subpage_ops = {
1716 .read = subpage_read,
1717 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001718 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001719 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001720};
1721
Anthony Liguoric227f092009-10-01 16:12:16 -05001722static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001723 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001724{
1725 int idx, eidx;
1726
1727 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1728 return -1;
1729 idx = SUBPAGE_IDX(start);
1730 eidx = SUBPAGE_IDX(end);
1731#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001732 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1733 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001734#endif
blueswir1db7b5422007-05-26 17:36:03 +00001735 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001736 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001737 }
1738
1739 return 0;
1740}
1741
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001742static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001743{
Anthony Liguoric227f092009-10-01 16:12:16 -05001744 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001745
Anthony Liguori7267c092011-08-20 22:09:37 -05001746 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001747
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001748 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001749 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001750 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001751 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001752 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001753#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001754 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1755 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001756#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001757 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001758
1759 return mmio;
1760}
1761
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001762static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001763{
1764 MemoryRegionSection section = {
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001765 .address_space = &address_space_memory,
Avi Kivity5312bd82012-02-12 18:32:55 +02001766 .mr = mr,
1767 .offset_within_address_space = 0,
1768 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001769 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001770 };
1771
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001772 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001773}
1774
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001775MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001776{
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001777 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001778}
1779
Avi Kivitye9179ce2009-06-14 11:38:52 +03001780static void io_mem_init(void)
1781{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001782 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1783 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001784 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001785 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001786 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001787 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001788 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001789}
1790
Avi Kivityac1970f2012-10-03 16:22:53 +02001791static void mem_begin(MemoryListener *listener)
1792{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001793 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001794 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1795 uint16_t n;
1796
1797 n = dummy_section(&d->map, &io_mem_unassigned);
1798 assert(n == PHYS_SECTION_UNASSIGNED);
1799 n = dummy_section(&d->map, &io_mem_notdirty);
1800 assert(n == PHYS_SECTION_NOTDIRTY);
1801 n = dummy_section(&d->map, &io_mem_rom);
1802 assert(n == PHYS_SECTION_ROM);
1803 n = dummy_section(&d->map, &io_mem_watch);
1804 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001805
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001806 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001807 d->as = as;
1808 as->next_dispatch = d;
1809}
1810
1811static void mem_commit(MemoryListener *listener)
1812{
1813 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001814 AddressSpaceDispatch *cur = as->dispatch;
1815 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001816
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001817 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001818
Paolo Bonzini0475d942013-05-29 12:28:21 +02001819 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001820
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001821 if (cur) {
1822 phys_sections_free(&cur->map);
1823 g_free(cur);
1824 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001825}
1826
Avi Kivity1d711482012-10-02 18:54:45 +02001827static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001828{
Andreas Färber182735e2013-05-29 22:29:20 +02001829 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001830
1831 /* since each CPU stores ram addresses in its TLB cache, we must
1832 reset the modified entries */
1833 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001834 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001835 CPUArchState *env = cpu->env_ptr;
1836
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01001837 /* FIXME: Disentangle the cpu.h circular files deps so we can
1838 directly get the right CPU from listener. */
1839 if (cpu->tcg_as_listener != listener) {
1840 continue;
1841 }
Avi Kivity117712c2012-02-12 21:23:17 +02001842 tlb_flush(env, 1);
1843 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001844}
1845
Avi Kivity93632742012-02-08 16:54:16 +02001846static void core_log_global_start(MemoryListener *listener)
1847{
Juan Quintela981fdf22013-10-10 11:54:09 +02001848 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001849}
1850
1851static void core_log_global_stop(MemoryListener *listener)
1852{
Juan Quintela981fdf22013-10-10 11:54:09 +02001853 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001854}
1855
Avi Kivity93632742012-02-08 16:54:16 +02001856static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001857 .log_global_start = core_log_global_start,
1858 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001859 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001860};
1861
Avi Kivityac1970f2012-10-03 16:22:53 +02001862void address_space_init_dispatch(AddressSpace *as)
1863{
Paolo Bonzini00752702013-05-29 12:13:54 +02001864 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001865 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001866 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001867 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001868 .region_add = mem_add,
1869 .region_nop = mem_add,
1870 .priority = 0,
1871 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001872 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001873}
1874
Avi Kivity83f3c252012-10-07 12:59:55 +02001875void address_space_destroy_dispatch(AddressSpace *as)
1876{
1877 AddressSpaceDispatch *d = as->dispatch;
1878
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001879 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001880 g_free(d);
1881 as->dispatch = NULL;
1882}
1883
Avi Kivity62152b82011-07-26 14:26:14 +03001884static void memory_map_init(void)
1885{
Anthony Liguori7267c092011-08-20 22:09:37 -05001886 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001887
Paolo Bonzini57271d62013-11-07 17:14:37 +01001888 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001889 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001890
Anthony Liguori7267c092011-08-20 22:09:37 -05001891 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001892 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1893 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001894 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001895
Avi Kivityf6790af2012-10-02 20:13:51 +02001896 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001897}
1898
1899MemoryRegion *get_system_memory(void)
1900{
1901 return system_memory;
1902}
1903
Avi Kivity309cb472011-08-08 16:09:03 +03001904MemoryRegion *get_system_io(void)
1905{
1906 return system_io;
1907}
1908
pbrooke2eef172008-06-08 01:09:01 +00001909#endif /* !defined(CONFIG_USER_ONLY) */
1910
bellard13eb76e2004-01-24 15:23:36 +00001911/* physical memory access (slow version, mainly for debug) */
1912#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001913int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001914 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001915{
1916 int l, flags;
1917 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001918 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001919
1920 while (len > 0) {
1921 page = addr & TARGET_PAGE_MASK;
1922 l = (page + TARGET_PAGE_SIZE) - addr;
1923 if (l > len)
1924 l = len;
1925 flags = page_get_flags(page);
1926 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001927 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001928 if (is_write) {
1929 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001930 return -1;
bellard579a97f2007-11-11 14:26:47 +00001931 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001932 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001933 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001934 memcpy(p, buf, l);
1935 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001936 } else {
1937 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001938 return -1;
bellard579a97f2007-11-11 14:26:47 +00001939 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001940 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001941 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001942 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001943 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001944 }
1945 len -= l;
1946 buf += l;
1947 addr += l;
1948 }
Paul Brooka68fe892010-03-01 00:08:59 +00001949 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001950}
bellard8df1cd02005-01-28 22:37:22 +00001951
bellard13eb76e2004-01-24 15:23:36 +00001952#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001953
Avi Kivitya8170e52012-10-23 12:30:10 +02001954static void invalidate_and_set_dirty(hwaddr addr,
1955 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001956{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001957 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001958 /* invalidate code */
1959 tb_invalidate_phys_page_range(addr, addr + length, 0);
1960 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001961 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1962 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001963 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001964 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001965}
1966
Richard Henderson23326162013-07-08 14:55:59 -07001967static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001968{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001969 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001970
1971 /* Regions are assumed to support 1-4 byte accesses unless
1972 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001973 if (access_size_max == 0) {
1974 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001975 }
Richard Henderson23326162013-07-08 14:55:59 -07001976
1977 /* Bound the maximum access by the alignment of the address. */
1978 if (!mr->ops->impl.unaligned) {
1979 unsigned align_size_max = addr & -addr;
1980 if (align_size_max != 0 && align_size_max < access_size_max) {
1981 access_size_max = align_size_max;
1982 }
1983 }
1984
1985 /* Don't attempt accesses larger than the maximum. */
1986 if (l > access_size_max) {
1987 l = access_size_max;
1988 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001989 if (l & (l - 1)) {
1990 l = 1 << (qemu_fls(l) - 1);
1991 }
Richard Henderson23326162013-07-08 14:55:59 -07001992
1993 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001994}
1995
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001996bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001997 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001998{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001999 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002000 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002001 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002002 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002003 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002004 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002005
bellard13eb76e2004-01-24 15:23:36 +00002006 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002007 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002008 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002009
bellard13eb76e2004-01-24 15:23:36 +00002010 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002011 if (!memory_access_is_direct(mr, is_write)) {
2012 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002013 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002014 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002015 switch (l) {
2016 case 8:
2017 /* 64 bit write access */
2018 val = ldq_p(buf);
2019 error |= io_mem_write(mr, addr1, val, 8);
2020 break;
2021 case 4:
bellard1c213d12005-09-03 10:49:04 +00002022 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002023 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002024 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002025 break;
2026 case 2:
bellard1c213d12005-09-03 10:49:04 +00002027 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002028 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002029 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002030 break;
2031 case 1:
bellard1c213d12005-09-03 10:49:04 +00002032 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002033 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002034 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002035 break;
2036 default:
2037 abort();
bellard13eb76e2004-01-24 15:23:36 +00002038 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002039 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002040 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002041 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002042 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002043 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002044 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002045 }
2046 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002047 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002048 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002049 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002050 switch (l) {
2051 case 8:
2052 /* 64 bit read access */
2053 error |= io_mem_read(mr, addr1, &val, 8);
2054 stq_p(buf, val);
2055 break;
2056 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002057 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002058 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002059 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002060 break;
2061 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002062 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002063 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002064 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002065 break;
2066 case 1:
bellard1c213d12005-09-03 10:49:04 +00002067 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002068 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002069 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002070 break;
2071 default:
2072 abort();
bellard13eb76e2004-01-24 15:23:36 +00002073 }
2074 } else {
2075 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002076 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002077 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002078 }
2079 }
2080 len -= l;
2081 buf += l;
2082 addr += l;
2083 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002084
2085 return error;
bellard13eb76e2004-01-24 15:23:36 +00002086}
bellard8df1cd02005-01-28 22:37:22 +00002087
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002088bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002089 const uint8_t *buf, int len)
2090{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002091 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002092}
2093
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002094bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002095{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002096 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002097}
2098
2099
Avi Kivitya8170e52012-10-23 12:30:10 +02002100void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002101 int len, int is_write)
2102{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002103 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002104}
2105
Alexander Graf582b55a2013-12-11 14:17:44 +01002106enum write_rom_type {
2107 WRITE_DATA,
2108 FLUSH_CACHE,
2109};
2110
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002111static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002112 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002113{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002114 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002115 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002116 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002117 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002118
bellardd0ecd2a2006-04-23 17:14:48 +00002119 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002120 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002121 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002122
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002123 if (!(memory_region_is_ram(mr) ||
2124 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002125 /* do nothing */
2126 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002127 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002128 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002129 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002130 switch (type) {
2131 case WRITE_DATA:
2132 memcpy(ptr, buf, l);
2133 invalidate_and_set_dirty(addr1, l);
2134 break;
2135 case FLUSH_CACHE:
2136 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2137 break;
2138 }
bellardd0ecd2a2006-04-23 17:14:48 +00002139 }
2140 len -= l;
2141 buf += l;
2142 addr += l;
2143 }
2144}
2145
Alexander Graf582b55a2013-12-11 14:17:44 +01002146/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002147void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002148 const uint8_t *buf, int len)
2149{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002150 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002151}
2152
2153void cpu_flush_icache_range(hwaddr start, int len)
2154{
2155 /*
2156 * This function should do the same thing as an icache flush that was
2157 * triggered from within the guest. For TCG we are always cache coherent,
2158 * so there is no need to flush anything. For KVM / Xen we need to flush
2159 * the host's instruction cache at least.
2160 */
2161 if (tcg_enabled()) {
2162 return;
2163 }
2164
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002165 cpu_physical_memory_write_rom_internal(&address_space_memory,
2166 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002167}
2168
aliguori6d16c2f2009-01-22 16:59:11 +00002169typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002170 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002171 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002172 hwaddr addr;
2173 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002174} BounceBuffer;
2175
2176static BounceBuffer bounce;
2177
aliguoriba223c22009-01-22 16:59:16 +00002178typedef struct MapClient {
2179 void *opaque;
2180 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002181 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002182} MapClient;
2183
Blue Swirl72cf2d42009-09-12 07:36:22 +00002184static QLIST_HEAD(map_client_list, MapClient) map_client_list
2185 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002186
2187void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2188{
Anthony Liguori7267c092011-08-20 22:09:37 -05002189 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002190
2191 client->opaque = opaque;
2192 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002193 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002194 return client;
2195}
2196
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002197static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002198{
2199 MapClient *client = (MapClient *)_client;
2200
Blue Swirl72cf2d42009-09-12 07:36:22 +00002201 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002202 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002203}
2204
2205static void cpu_notify_map_clients(void)
2206{
2207 MapClient *client;
2208
Blue Swirl72cf2d42009-09-12 07:36:22 +00002209 while (!QLIST_EMPTY(&map_client_list)) {
2210 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002211 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002212 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002213 }
2214}
2215
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002216bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2217{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002218 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002219 hwaddr l, xlat;
2220
2221 while (len > 0) {
2222 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002223 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2224 if (!memory_access_is_direct(mr, is_write)) {
2225 l = memory_access_size(mr, l, addr);
2226 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002227 return false;
2228 }
2229 }
2230
2231 len -= l;
2232 addr += l;
2233 }
2234 return true;
2235}
2236
aliguori6d16c2f2009-01-22 16:59:11 +00002237/* Map a physical memory region into a host virtual address.
2238 * May map a subset of the requested range, given by and returned in *plen.
2239 * May return NULL if resources needed to perform the mapping are exhausted.
2240 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002241 * Use cpu_register_map_client() to know when retrying the map operation is
2242 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002243 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002244void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002245 hwaddr addr,
2246 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002247 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002248{
Avi Kivitya8170e52012-10-23 12:30:10 +02002249 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002250 hwaddr done = 0;
2251 hwaddr l, xlat, base;
2252 MemoryRegion *mr, *this_mr;
2253 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002254
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002255 if (len == 0) {
2256 return NULL;
2257 }
aliguori6d16c2f2009-01-22 16:59:11 +00002258
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002259 l = len;
2260 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2261 if (!memory_access_is_direct(mr, is_write)) {
2262 if (bounce.buffer) {
2263 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002264 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002265 /* Avoid unbounded allocations */
2266 l = MIN(l, TARGET_PAGE_SIZE);
2267 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002268 bounce.addr = addr;
2269 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002270
2271 memory_region_ref(mr);
2272 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002273 if (!is_write) {
2274 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002275 }
aliguori6d16c2f2009-01-22 16:59:11 +00002276
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002277 *plen = l;
2278 return bounce.buffer;
2279 }
2280
2281 base = xlat;
2282 raddr = memory_region_get_ram_addr(mr);
2283
2284 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002285 len -= l;
2286 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002287 done += l;
2288 if (len == 0) {
2289 break;
2290 }
2291
2292 l = len;
2293 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2294 if (this_mr != mr || xlat != base + done) {
2295 break;
2296 }
aliguori6d16c2f2009-01-22 16:59:11 +00002297 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002298
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002299 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002300 *plen = done;
2301 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002302}
2303
Avi Kivityac1970f2012-10-03 16:22:53 +02002304/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002305 * Will also mark the memory as dirty if is_write == 1. access_len gives
2306 * the amount of memory that was actually read or written by the caller.
2307 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002308void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2309 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002310{
2311 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002312 MemoryRegion *mr;
2313 ram_addr_t addr1;
2314
2315 mr = qemu_ram_addr_from_host(buffer, &addr1);
2316 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002317 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002318 while (access_len) {
2319 unsigned l;
2320 l = TARGET_PAGE_SIZE;
2321 if (l > access_len)
2322 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002323 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002324 addr1 += l;
2325 access_len -= l;
2326 }
2327 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002328 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002329 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002330 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002331 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002332 return;
2333 }
2334 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002335 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002336 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002337 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002338 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002339 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002340 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002341}
bellardd0ecd2a2006-04-23 17:14:48 +00002342
Avi Kivitya8170e52012-10-23 12:30:10 +02002343void *cpu_physical_memory_map(hwaddr addr,
2344 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002345 int is_write)
2346{
2347 return address_space_map(&address_space_memory, addr, plen, is_write);
2348}
2349
Avi Kivitya8170e52012-10-23 12:30:10 +02002350void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2351 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002352{
2353 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2354}
2355
bellard8df1cd02005-01-28 22:37:22 +00002356/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002357static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002358 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002359{
bellard8df1cd02005-01-28 22:37:22 +00002360 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002361 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002362 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002363 hwaddr l = 4;
2364 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002365
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002366 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002367 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002368 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002369 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002370#if defined(TARGET_WORDS_BIGENDIAN)
2371 if (endian == DEVICE_LITTLE_ENDIAN) {
2372 val = bswap32(val);
2373 }
2374#else
2375 if (endian == DEVICE_BIG_ENDIAN) {
2376 val = bswap32(val);
2377 }
2378#endif
bellard8df1cd02005-01-28 22:37:22 +00002379 } else {
2380 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002381 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002382 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002383 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002384 switch (endian) {
2385 case DEVICE_LITTLE_ENDIAN:
2386 val = ldl_le_p(ptr);
2387 break;
2388 case DEVICE_BIG_ENDIAN:
2389 val = ldl_be_p(ptr);
2390 break;
2391 default:
2392 val = ldl_p(ptr);
2393 break;
2394 }
bellard8df1cd02005-01-28 22:37:22 +00002395 }
2396 return val;
2397}
2398
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002399uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002400{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002401 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002402}
2403
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002404uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002405{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002406 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002407}
2408
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002409uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002410{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002411 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002412}
2413
bellard84b7b8e2005-11-28 21:19:04 +00002414/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002415static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002416 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002417{
bellard84b7b8e2005-11-28 21:19:04 +00002418 uint8_t *ptr;
2419 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002420 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002421 hwaddr l = 8;
2422 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002423
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002424 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002425 false);
2426 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002427 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002428 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002429#if defined(TARGET_WORDS_BIGENDIAN)
2430 if (endian == DEVICE_LITTLE_ENDIAN) {
2431 val = bswap64(val);
2432 }
2433#else
2434 if (endian == DEVICE_BIG_ENDIAN) {
2435 val = bswap64(val);
2436 }
2437#endif
bellard84b7b8e2005-11-28 21:19:04 +00002438 } else {
2439 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002440 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002441 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002442 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002443 switch (endian) {
2444 case DEVICE_LITTLE_ENDIAN:
2445 val = ldq_le_p(ptr);
2446 break;
2447 case DEVICE_BIG_ENDIAN:
2448 val = ldq_be_p(ptr);
2449 break;
2450 default:
2451 val = ldq_p(ptr);
2452 break;
2453 }
bellard84b7b8e2005-11-28 21:19:04 +00002454 }
2455 return val;
2456}
2457
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002458uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002459{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002460 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461}
2462
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002463uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002464{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002465 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466}
2467
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002468uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002469{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002470 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471}
2472
bellardaab33092005-10-30 20:48:42 +00002473/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002474uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002475{
2476 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002477 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002478 return val;
2479}
2480
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002482static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002483 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002484{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485 uint8_t *ptr;
2486 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002487 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002488 hwaddr l = 2;
2489 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002490
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002491 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002492 false);
2493 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002494 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002495 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002496#if defined(TARGET_WORDS_BIGENDIAN)
2497 if (endian == DEVICE_LITTLE_ENDIAN) {
2498 val = bswap16(val);
2499 }
2500#else
2501 if (endian == DEVICE_BIG_ENDIAN) {
2502 val = bswap16(val);
2503 }
2504#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002505 } else {
2506 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002507 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002508 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002509 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002510 switch (endian) {
2511 case DEVICE_LITTLE_ENDIAN:
2512 val = lduw_le_p(ptr);
2513 break;
2514 case DEVICE_BIG_ENDIAN:
2515 val = lduw_be_p(ptr);
2516 break;
2517 default:
2518 val = lduw_p(ptr);
2519 break;
2520 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002521 }
2522 return val;
bellardaab33092005-10-30 20:48:42 +00002523}
2524
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002525uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002526{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002527 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002528}
2529
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002530uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002531{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002532 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533}
2534
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002535uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002536{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002537 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002538}
2539
bellard8df1cd02005-01-28 22:37:22 +00002540/* warning: addr must be aligned. The ram page is not masked as dirty
2541 and the code inside is not invalidated. It is useful if the dirty
2542 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002543void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002544{
bellard8df1cd02005-01-28 22:37:22 +00002545 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002546 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002547 hwaddr l = 4;
2548 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002549
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002550 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002551 true);
2552 if (l < 4 || !memory_access_is_direct(mr, true)) {
2553 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002554 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002555 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002556 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002557 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002558
2559 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002560 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002561 /* invalidate code */
2562 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2563 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002564 cpu_physical_memory_set_dirty_flag(addr1,
2565 DIRTY_MEMORY_MIGRATION);
2566 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002567 }
2568 }
bellard8df1cd02005-01-28 22:37:22 +00002569 }
2570}
2571
2572/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002573static inline void stl_phys_internal(AddressSpace *as,
2574 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002575 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002576{
bellard8df1cd02005-01-28 22:37:22 +00002577 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002578 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002579 hwaddr l = 4;
2580 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002581
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002582 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002583 true);
2584 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002585#if defined(TARGET_WORDS_BIGENDIAN)
2586 if (endian == DEVICE_LITTLE_ENDIAN) {
2587 val = bswap32(val);
2588 }
2589#else
2590 if (endian == DEVICE_BIG_ENDIAN) {
2591 val = bswap32(val);
2592 }
2593#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002594 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002595 } else {
bellard8df1cd02005-01-28 22:37:22 +00002596 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002597 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002598 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002599 switch (endian) {
2600 case DEVICE_LITTLE_ENDIAN:
2601 stl_le_p(ptr, val);
2602 break;
2603 case DEVICE_BIG_ENDIAN:
2604 stl_be_p(ptr, val);
2605 break;
2606 default:
2607 stl_p(ptr, val);
2608 break;
2609 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002610 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002611 }
2612}
2613
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002614void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002615{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002616 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002617}
2618
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002619void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002620{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002621 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002622}
2623
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002624void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002625{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002626 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002627}
2628
bellardaab33092005-10-30 20:48:42 +00002629/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002630void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002631{
2632 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002633 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002634}
2635
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002636/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002637static inline void stw_phys_internal(AddressSpace *as,
2638 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002639 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002640{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002641 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002642 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002643 hwaddr l = 2;
2644 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002645
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002646 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002647 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002648#if defined(TARGET_WORDS_BIGENDIAN)
2649 if (endian == DEVICE_LITTLE_ENDIAN) {
2650 val = bswap16(val);
2651 }
2652#else
2653 if (endian == DEVICE_BIG_ENDIAN) {
2654 val = bswap16(val);
2655 }
2656#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002657 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002658 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002659 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002660 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002661 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002662 switch (endian) {
2663 case DEVICE_LITTLE_ENDIAN:
2664 stw_le_p(ptr, val);
2665 break;
2666 case DEVICE_BIG_ENDIAN:
2667 stw_be_p(ptr, val);
2668 break;
2669 default:
2670 stw_p(ptr, val);
2671 break;
2672 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002673 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002674 }
bellardaab33092005-10-30 20:48:42 +00002675}
2676
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002677void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002678{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002679 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002680}
2681
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002682void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002683{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002684 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002685}
2686
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002687void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002688{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002689 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002690}
2691
bellardaab33092005-10-30 20:48:42 +00002692/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002693void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002694{
2695 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002696 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002697}
2698
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002699void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002700{
2701 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002702 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002703}
2704
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002705void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002706{
2707 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002708 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002709}
2710
aliguori5e2972f2009-03-28 17:51:36 +00002711/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002712int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002713 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002714{
2715 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002716 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002717 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002718
2719 while (len > 0) {
2720 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002721 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002722 /* if no physical page mapped, return an error */
2723 if (phys_addr == -1)
2724 return -1;
2725 l = (page + TARGET_PAGE_SIZE) - addr;
2726 if (l > len)
2727 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002728 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002729 if (is_write) {
2730 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2731 } else {
2732 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2733 }
bellard13eb76e2004-01-24 15:23:36 +00002734 len -= l;
2735 buf += l;
2736 addr += l;
2737 }
2738 return 0;
2739}
Paul Brooka68fe892010-03-01 00:08:59 +00002740#endif
bellard13eb76e2004-01-24 15:23:36 +00002741
Blue Swirl8e4a4242013-01-06 18:30:17 +00002742#if !defined(CONFIG_USER_ONLY)
2743
2744/*
2745 * A helper function for the _utterly broken_ virtio device model to find out if
2746 * it's running on a big endian machine. Don't do this at home kids!
2747 */
2748bool virtio_is_big_endian(void);
2749bool virtio_is_big_endian(void)
2750{
2751#if defined(TARGET_WORDS_BIGENDIAN)
2752 return true;
2753#else
2754 return false;
2755#endif
2756}
2757
2758#endif
2759
Wen Congyang76f35532012-05-07 12:04:18 +08002760#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002761bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002762{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002763 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002764 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002765
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002766 mr = address_space_translate(&address_space_memory,
2767 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002768
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002769 return !(memory_region_is_ram(mr) ||
2770 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002771}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002772
2773void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2774{
2775 RAMBlock *block;
2776
2777 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2778 func(block->host, block->offset, block->length, opaque);
2779 }
2780}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002781#endif