blob: 514d6a08cc6944b19d9ccde5d2d7211417c33689 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010053#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020054
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020055#include "qemu/range.h"
56
blueswir1db7b5422007-05-26 17:36:03 +000057//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000058
pbrook99773bd2006-04-16 15:14:59 +000059#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020060static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000061
Paolo Bonzinia3161032012-11-14 15:54:48 +010062RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030063
64static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030065static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030066
Avi Kivityf6790af2012-10-02 20:13:51 +020067AddressSpace address_space_io;
68AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020069
Paolo Bonzini0844e002013-05-24 14:37:28 +020070MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020071static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020072
pbrooke2eef172008-06-08 01:09:01 +000073#endif
bellard9fa3e852004-01-04 18:06:42 +000074
Andreas Färberbdc44642013-06-24 23:50:24 +020075struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000076/* current CPU in the current thread. It is only valid inside
77 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020078DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000079/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000080 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000081 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010082int use_icount;
bellard6a00d602005-11-21 23:25:50 +000083
pbrooke2eef172008-06-08 01:09:01 +000084#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020085
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020086typedef struct PhysPageEntry PhysPageEntry;
87
88struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020089 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020090 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020091 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020092 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020093};
94
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020095#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96
Paolo Bonzini03f49952013-11-07 17:14:36 +010097/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010098#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +010099
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200100#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100101#define P_L2_SIZE (1 << P_L2_BITS)
102
103#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104
105typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200106
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200107typedef struct PhysPageMap {
108 unsigned sections_nb;
109 unsigned sections_nb_alloc;
110 unsigned nodes_nb;
111 unsigned nodes_nb_alloc;
112 Node *nodes;
113 MemoryRegionSection *sections;
114} PhysPageMap;
115
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200116struct AddressSpaceDispatch {
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
119 */
120 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200121 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200122 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200123};
124
Jan Kiszka90260c62013-05-26 21:46:51 +0200125#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126typedef struct subpage_t {
127 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200128 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200129 hwaddr base;
130 uint16_t sub_section[TARGET_PAGE_SIZE];
131} subpage_t;
132
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200133#define PHYS_SECTION_UNASSIGNED 0
134#define PHYS_SECTION_NOTDIRTY 1
135#define PHYS_SECTION_ROM 2
136#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200137
pbrooke2eef172008-06-08 01:09:01 +0000138static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300139static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000140
Avi Kivity1ec9b902012-01-02 12:47:48 +0200141static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000142#endif
bellard54936002003-05-13 00:25:15 +0000143
Paul Brook6d9a1302010-02-28 23:55:53 +0000144#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200145
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200148 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
149 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
151 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200152 }
153}
154
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200155static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200156{
157 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200158 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200159
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200160 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200161 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100163 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200164 map->nodes[ret][i].skip = 1;
165 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200166 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200168}
169
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200170static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
171 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200172 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173{
174 PhysPageEntry *p;
175 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100176 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200177
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200178 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200179 lp->ptr = phys_map_node_alloc(map);
180 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200181 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100182 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200183 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200184 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185 }
186 }
187 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100190 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191
Paolo Bonzini03f49952013-11-07 17:14:36 +0100192 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200193 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200194 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200195 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200196 *index += step;
197 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200198 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200199 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200200 }
201 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200202 }
203}
204
Avi Kivityac1970f2012-10-03 16:22:53 +0200205static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200206 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200207 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000208{
Avi Kivity29990972012-02-13 20:21:20 +0200209 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200210 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000211
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200212 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000213}
214
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200215/* Compact a non leaf page entry. Simply detect that the entry has a single child,
216 * and update our entry so we can skip it and go directly to the destination.
217 */
218static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
219{
220 unsigned valid_ptr = P_L2_SIZE;
221 int valid = 0;
222 PhysPageEntry *p;
223 int i;
224
225 if (lp->ptr == PHYS_MAP_NODE_NIL) {
226 return;
227 }
228
229 p = nodes[lp->ptr];
230 for (i = 0; i < P_L2_SIZE; i++) {
231 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
232 continue;
233 }
234
235 valid_ptr = i;
236 valid++;
237 if (p[i].skip) {
238 phys_page_compact(&p[i], nodes, compacted);
239 }
240 }
241
242 /* We can only compress if there's only one child. */
243 if (valid != 1) {
244 return;
245 }
246
247 assert(valid_ptr < P_L2_SIZE);
248
249 /* Don't compress if it won't fit in the # of bits we have. */
250 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
251 return;
252 }
253
254 lp->ptr = p[valid_ptr].ptr;
255 if (!p[valid_ptr].skip) {
256 /* If our only child is a leaf, make this a leaf. */
257 /* By design, we should have made this node a leaf to begin with so we
258 * should never reach here.
259 * But since it's so simple to handle this, let's do it just in case we
260 * change this rule.
261 */
262 lp->skip = 0;
263 } else {
264 lp->skip += p[valid_ptr].skip;
265 }
266}
267
268static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
269{
270 DECLARE_BITMAP(compacted, nodes_nb);
271
272 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200273 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200274 }
275}
276
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200277static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200278 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000279{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200280 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200281 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200282 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200283
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200284 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200285 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200286 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200287 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200288 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100289 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200290 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200291
292 if (sections[lp.ptr].size.hi ||
293 range_covers_byte(sections[lp.ptr].offset_within_address_space,
294 sections[lp.ptr].size.lo, addr)) {
295 return &sections[lp.ptr];
296 } else {
297 return &sections[PHYS_SECTION_UNASSIGNED];
298 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200299}
300
Blue Swirle5548612012-04-21 13:08:33 +0000301bool memory_region_is_unassigned(MemoryRegion *mr)
302{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200303 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000304 && mr != &io_mem_watch;
305}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200306
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200307static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200308 hwaddr addr,
309 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200310{
Jan Kiszka90260c62013-05-26 21:46:51 +0200311 MemoryRegionSection *section;
312 subpage_t *subpage;
313
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200314 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200315 if (resolve_subpage && section->mr->subpage) {
316 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200317 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200318 }
319 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200320}
321
Jan Kiszka90260c62013-05-26 21:46:51 +0200322static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200323address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200324 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200325{
326 MemoryRegionSection *section;
327 Int128 diff;
328
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200329 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200330 /* Compute offset within MemoryRegionSection */
331 addr -= section->offset_within_address_space;
332
333 /* Compute offset within MemoryRegion */
334 *xlat = addr + section->offset_within_region;
335
336 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100337 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200338 return section;
339}
Jan Kiszka90260c62013-05-26 21:46:51 +0200340
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200341MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
342 hwaddr *xlat, hwaddr *plen,
343 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200344{
Avi Kivity30951152012-10-30 13:47:46 +0200345 IOMMUTLBEntry iotlb;
346 MemoryRegionSection *section;
347 MemoryRegion *mr;
348 hwaddr len = *plen;
349
350 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200351 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200352 mr = section->mr;
353
354 if (!mr->iommu_ops) {
355 break;
356 }
357
358 iotlb = mr->iommu_ops->translate(mr, addr);
359 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
360 | (addr & iotlb.addr_mask));
361 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
362 if (!(iotlb.perm & (1 << is_write))) {
363 mr = &io_mem_unassigned;
364 break;
365 }
366
367 as = iotlb.target_as;
368 }
369
370 *plen = len;
371 *xlat = addr;
372 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200373}
374
375MemoryRegionSection *
376address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
377 hwaddr *plen)
378{
Avi Kivity30951152012-10-30 13:47:46 +0200379 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200380 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200381
382 assert(!section->mr->iommu_ops);
383 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200384}
bellard9fa3e852004-01-04 18:06:42 +0000385#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000386
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200387void cpu_exec_init_all(void)
388{
389#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700390 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200391 memory_map_init();
392 io_mem_init();
393#endif
394}
395
Andreas Färberb170fce2013-01-20 20:23:22 +0100396#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000397
Juan Quintelae59fb372009-09-29 22:48:21 +0200398static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200399{
Andreas Färber259186a2013-01-17 18:51:17 +0100400 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200401
aurel323098dba2009-03-07 21:28:24 +0000402 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
403 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100404 cpu->interrupt_request &= ~0x01;
405 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000406
407 return 0;
408}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200409
Andreas Färber1a1562f2013-06-17 04:09:11 +0200410const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200411 .name = "cpu_common",
412 .version_id = 1,
413 .minimum_version_id = 1,
414 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200415 .post_load = cpu_common_post_load,
416 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100417 VMSTATE_UINT32(halted, CPUState),
418 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200419 VMSTATE_END_OF_LIST()
420 }
421};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200422
pbrook9656f322008-07-01 20:01:19 +0000423#endif
424
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100425CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400426{
Andreas Färberbdc44642013-06-24 23:50:24 +0200427 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400428
Andreas Färberbdc44642013-06-24 23:50:24 +0200429 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100430 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200431 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100432 }
Glauber Costa950f1472009-06-09 12:15:18 -0400433 }
434
Andreas Färberbdc44642013-06-24 23:50:24 +0200435 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400436}
437
Andreas Färber9349b4f2012-03-14 01:38:32 +0100438void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000439{
Andreas Färber9f09e182012-05-03 06:59:07 +0200440 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100441 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200442 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000443 int cpu_index;
444
pbrookc2764712009-03-07 15:24:59 +0000445#if defined(CONFIG_USER_ONLY)
446 cpu_list_lock();
447#endif
bellard6a00d602005-11-21 23:25:50 +0000448 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200449 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000450 cpu_index++;
451 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100452 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100453 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000454 QTAILQ_INIT(&env->breakpoints);
455 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100456#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200457 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100458#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200459 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000460#if defined(CONFIG_USER_ONLY)
461 cpu_list_unlock();
462#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200463 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
464 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
465 }
pbrookb3c77242008-06-30 16:31:04 +0000466#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600467 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000468 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100469 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200470 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000471#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100472 if (cc->vmsd != NULL) {
473 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
474 }
bellardfd6ce8f2003-05-14 19:00:11 +0000475}
476
bellard1fddef42005-04-17 19:16:13 +0000477#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000478#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200479static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000480{
481 tb_invalidate_phys_page_range(pc, pc + 1, 0);
482}
483#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200484static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400485{
Max Filippove8262a12013-09-27 22:29:17 +0400486 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
487 if (phys != -1) {
488 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
489 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400490}
bellardc27004e2005-01-03 23:35:10 +0000491#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000492#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000493
Paul Brookc527ee82010-03-01 03:31:14 +0000494#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100495void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000496
497{
498}
499
Andreas Färber9349b4f2012-03-14 01:38:32 +0100500int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000501 int flags, CPUWatchpoint **watchpoint)
502{
503 return -ENOSYS;
504}
505#else
pbrook6658ffb2007-03-16 23:58:11 +0000506/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100507int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000508 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000509{
aliguorib4051332008-11-18 20:14:20 +0000510 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000511 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000512
aliguorib4051332008-11-18 20:14:20 +0000513 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400514 if ((len & (len - 1)) || (addr & ~len_mask) ||
515 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000516 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
517 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
518 return -EINVAL;
519 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500520 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000521
aliguoria1d1bb32008-11-18 20:07:32 +0000522 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000523 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000524 wp->flags = flags;
525
aliguori2dc9f412008-11-18 20:56:59 +0000526 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000527 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000528 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000529 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000530 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000531
pbrook6658ffb2007-03-16 23:58:11 +0000532 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000533
534 if (watchpoint)
535 *watchpoint = wp;
536 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000537}
538
aliguoria1d1bb32008-11-18 20:07:32 +0000539/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100540int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000541 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000542{
aliguorib4051332008-11-18 20:14:20 +0000543 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000544 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000545
Blue Swirl72cf2d42009-09-12 07:36:22 +0000546 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000547 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000548 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000549 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000550 return 0;
551 }
552 }
aliguoria1d1bb32008-11-18 20:07:32 +0000553 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000554}
555
aliguoria1d1bb32008-11-18 20:07:32 +0000556/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100557void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000558{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000559 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000560
aliguoria1d1bb32008-11-18 20:07:32 +0000561 tlb_flush_page(env, watchpoint->vaddr);
562
Anthony Liguori7267c092011-08-20 22:09:37 -0500563 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000564}
565
aliguoria1d1bb32008-11-18 20:07:32 +0000566/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100567void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000568{
aliguoric0ce9982008-11-25 22:13:57 +0000569 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000570
Blue Swirl72cf2d42009-09-12 07:36:22 +0000571 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000572 if (wp->flags & mask)
573 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000574 }
aliguoria1d1bb32008-11-18 20:07:32 +0000575}
Paul Brookc527ee82010-03-01 03:31:14 +0000576#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000577
578/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100579int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000580 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000581{
bellard1fddef42005-04-17 19:16:13 +0000582#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000583 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000584
Anthony Liguori7267c092011-08-20 22:09:37 -0500585 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000586
587 bp->pc = pc;
588 bp->flags = flags;
589
aliguori2dc9f412008-11-18 20:56:59 +0000590 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200591 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000592 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200593 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000594 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200595 }
aliguoria1d1bb32008-11-18 20:07:32 +0000596
Andreas Färber00b941e2013-06-29 18:55:54 +0200597 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000598
Andreas Färber00b941e2013-06-29 18:55:54 +0200599 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000600 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200601 }
aliguoria1d1bb32008-11-18 20:07:32 +0000602 return 0;
603#else
604 return -ENOSYS;
605#endif
606}
607
608/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100609int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000610{
611#if defined(TARGET_HAS_ICE)
612 CPUBreakpoint *bp;
613
Blue Swirl72cf2d42009-09-12 07:36:22 +0000614 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000615 if (bp->pc == pc && bp->flags == flags) {
616 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000617 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000618 }
bellard4c3a88a2003-07-26 12:06:08 +0000619 }
aliguoria1d1bb32008-11-18 20:07:32 +0000620 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000621#else
aliguoria1d1bb32008-11-18 20:07:32 +0000622 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000623#endif
624}
625
aliguoria1d1bb32008-11-18 20:07:32 +0000626/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100627void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000628{
bellard1fddef42005-04-17 19:16:13 +0000629#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000630 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000631
Andreas Färber00b941e2013-06-29 18:55:54 +0200632 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000633
Anthony Liguori7267c092011-08-20 22:09:37 -0500634 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000635#endif
636}
637
638/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100639void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000640{
641#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000642 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000643
Blue Swirl72cf2d42009-09-12 07:36:22 +0000644 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000645 if (bp->flags & mask)
646 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000647 }
bellard4c3a88a2003-07-26 12:06:08 +0000648#endif
649}
650
bellardc33a3462003-07-29 20:50:33 +0000651/* enable or disable single step mode. EXCP_DEBUG is returned by the
652 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200653void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000654{
bellard1fddef42005-04-17 19:16:13 +0000655#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200656 if (cpu->singlestep_enabled != enabled) {
657 cpu->singlestep_enabled = enabled;
658 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200659 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200660 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100661 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000662 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200663 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000664 tb_flush(env);
665 }
bellardc33a3462003-07-29 20:50:33 +0000666 }
667#endif
668}
669
Andreas Färber9349b4f2012-03-14 01:38:32 +0100670void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000671{
Andreas Färber878096e2013-05-27 01:33:50 +0200672 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000673 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000674 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000675
676 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000677 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000678 fprintf(stderr, "qemu: fatal: ");
679 vfprintf(stderr, fmt, ap);
680 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200681 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000682 if (qemu_log_enabled()) {
683 qemu_log("qemu: fatal: ");
684 qemu_log_vprintf(fmt, ap2);
685 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200686 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000687 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000688 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000689 }
pbrook493ae1f2007-11-23 16:53:59 +0000690 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000691 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200692#if defined(CONFIG_USER_ONLY)
693 {
694 struct sigaction act;
695 sigfillset(&act.sa_mask);
696 act.sa_handler = SIG_DFL;
697 sigaction(SIGABRT, &act, NULL);
698 }
699#endif
bellard75012672003-06-21 13:11:07 +0000700 abort();
701}
702
bellard01243112004-01-04 15:48:17 +0000703#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200704static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
705{
706 RAMBlock *block;
707
708 /* The list is protected by the iothread lock here. */
709 block = ram_list.mru_block;
710 if (block && addr - block->offset < block->length) {
711 goto found;
712 }
713 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
714 if (addr - block->offset < block->length) {
715 goto found;
716 }
717 }
718
719 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
720 abort();
721
722found:
723 ram_list.mru_block = block;
724 return block;
725}
726
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200727static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000728{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200729 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200730 RAMBlock *block;
731 ram_addr_t end;
732
733 end = TARGET_PAGE_ALIGN(start + length);
734 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000735
Paolo Bonzini041603f2013-09-09 17:49:45 +0200736 block = qemu_get_ram_block(start);
737 assert(block == qemu_get_ram_block(end - 1));
738 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000739 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200740}
741
742/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200743void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200744 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200745{
Juan Quintelad24981d2012-05-22 00:42:40 +0200746 if (length == 0)
747 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200748 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200749
750 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200751 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200752 }
bellard1ccde1c2004-02-06 19:46:14 +0000753}
754
Juan Quintela981fdf22013-10-10 11:54:09 +0200755static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000756{
757 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000758}
759
Avi Kivitya8170e52012-10-23 12:30:10 +0200760hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200761 MemoryRegionSection *section,
762 target_ulong vaddr,
763 hwaddr paddr, hwaddr xlat,
764 int prot,
765 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000766{
Avi Kivitya8170e52012-10-23 12:30:10 +0200767 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000768 CPUWatchpoint *wp;
769
Blue Swirlcc5bea62012-04-14 14:56:48 +0000770 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000771 /* Normal RAM. */
772 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200773 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000774 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200775 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000776 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200777 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000778 }
779 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200780 iotlb = section - address_space_memory.dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200781 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000782 }
783
784 /* Make accesses to pages with watchpoints go via the
785 watchpoint trap routines. */
786 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
787 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
788 /* Avoid trapping reads of pages with a write breakpoint. */
789 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200790 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000791 *address |= TLB_MMIO;
792 break;
793 }
794 }
795 }
796
797 return iotlb;
798}
bellard9fa3e852004-01-04 18:06:42 +0000799#endif /* defined(CONFIG_USER_ONLY) */
800
pbrooke2eef172008-06-08 01:09:01 +0000801#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000802
Anthony Liguoric227f092009-10-01 16:12:16 -0500803static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200804 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200805static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200806
Stefan Weil575ddeb2013-09-29 20:56:45 +0200807static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200808
809/*
810 * Set a custom physical guest memory alloator.
811 * Accelerators with unusual needs may need this. Hopefully, we can
812 * get rid of it eventually.
813 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200814void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200815{
816 phys_mem_alloc = alloc;
817}
818
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200819static uint16_t phys_section_add(PhysPageMap *map,
820 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200821{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200822 /* The physical section number is ORed with a page-aligned
823 * pointer to produce the iotlb entries. Thus it should
824 * never overflow into the page-aligned value.
825 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200826 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200827
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200828 if (map->sections_nb == map->sections_nb_alloc) {
829 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
830 map->sections = g_renew(MemoryRegionSection, map->sections,
831 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200832 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200833 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200834 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200835 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200836}
837
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200838static void phys_section_destroy(MemoryRegion *mr)
839{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200840 memory_region_unref(mr);
841
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200842 if (mr->subpage) {
843 subpage_t *subpage = container_of(mr, subpage_t, iomem);
844 memory_region_destroy(&subpage->iomem);
845 g_free(subpage);
846 }
847}
848
Paolo Bonzini60926662013-05-29 12:30:26 +0200849static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200850{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200851 while (map->sections_nb > 0) {
852 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200853 phys_section_destroy(section->mr);
854 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200855 g_free(map->sections);
856 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200857}
858
Avi Kivityac1970f2012-10-03 16:22:53 +0200859static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200860{
861 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200862 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200863 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200864 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200865 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200866 MemoryRegionSection subsection = {
867 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200868 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200869 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200870 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200871
Avi Kivityf3705d52012-03-08 16:16:34 +0200872 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200873
Avi Kivityf3705d52012-03-08 16:16:34 +0200874 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200875 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200876 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200877 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200878 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200879 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200880 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200881 }
882 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200883 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200884 subpage_register(subpage, start, end,
885 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200886}
887
888
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200889static void register_multipage(AddressSpaceDispatch *d,
890 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000891{
Avi Kivitya8170e52012-10-23 12:30:10 +0200892 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200893 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200894 uint64_t num_pages = int128_get64(int128_rshift(section->size,
895 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200896
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200897 assert(num_pages);
898 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000899}
900
Avi Kivityac1970f2012-10-03 16:22:53 +0200901static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200902{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200903 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200904 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200905 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200906 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200907
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200908 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
909 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
910 - now.offset_within_address_space;
911
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200912 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200913 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200914 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200915 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200916 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200917 while (int128_ne(remain.size, now.size)) {
918 remain.size = int128_sub(remain.size, now.size);
919 remain.offset_within_address_space += int128_get64(now.size);
920 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400921 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200922 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200923 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800924 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200925 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200926 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400927 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200928 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200929 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400930 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200931 }
932}
933
Sheng Yang62a27442010-01-26 19:21:16 +0800934void qemu_flush_coalesced_mmio_buffer(void)
935{
936 if (kvm_enabled())
937 kvm_flush_coalesced_mmio_buffer();
938}
939
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700940void qemu_mutex_lock_ramlist(void)
941{
942 qemu_mutex_lock(&ram_list.mutex);
943}
944
945void qemu_mutex_unlock_ramlist(void)
946{
947 qemu_mutex_unlock(&ram_list.mutex);
948}
949
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200950#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300951
952#include <sys/vfs.h>
953
954#define HUGETLBFS_MAGIC 0x958458f6
955
956static long gethugepagesize(const char *path)
957{
958 struct statfs fs;
959 int ret;
960
961 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900962 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300963 } while (ret != 0 && errno == EINTR);
964
965 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900966 perror(path);
967 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300968 }
969
970 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900971 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300972
973 return fs.f_bsize;
974}
975
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200976static sigjmp_buf sigjump;
977
978static void sigbus_handler(int signal)
979{
980 siglongjmp(sigjump, 1);
981}
982
Alex Williamson04b16652010-07-02 11:13:17 -0600983static void *file_ram_alloc(RAMBlock *block,
984 ram_addr_t memory,
985 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300986{
987 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500988 char *sanitized_name;
989 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300990 void *area;
991 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300992 unsigned long hpagesize;
993
994 hpagesize = gethugepagesize(path);
995 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900996 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300997 }
998
999 if (memory < hpagesize) {
1000 return NULL;
1001 }
1002
1003 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1004 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1005 return NULL;
1006 }
1007
Peter Feiner8ca761f2013-03-04 13:54:25 -05001008 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1009 sanitized_name = g_strdup(block->mr->name);
1010 for (c = sanitized_name; *c != '\0'; c++) {
1011 if (*c == '/')
1012 *c = '_';
1013 }
1014
1015 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1016 sanitized_name);
1017 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001018
1019 fd = mkstemp(filename);
1020 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001021 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001022 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001023 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001024 }
1025 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001026 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001027
1028 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1029
1030 /*
1031 * ftruncate is not supported by hugetlbfs in older
1032 * hosts, so don't bother bailing out on errors.
1033 * If anything goes wrong with it under other filesystems,
1034 * mmap will fail.
1035 */
1036 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001037 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001038
Marcelo Tosattic9027602010-03-01 20:25:08 -03001039 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001040 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001041 perror("file_ram_alloc: can't mmap RAM pages");
1042 close(fd);
1043 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001044 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001045
1046 if (mem_prealloc) {
1047 int ret, i;
1048 struct sigaction act, oldact;
1049 sigset_t set, oldset;
1050
1051 memset(&act, 0, sizeof(act));
1052 act.sa_handler = &sigbus_handler;
1053 act.sa_flags = 0;
1054
1055 ret = sigaction(SIGBUS, &act, &oldact);
1056 if (ret) {
1057 perror("file_ram_alloc: failed to install signal handler");
1058 exit(1);
1059 }
1060
1061 /* unblock SIGBUS */
1062 sigemptyset(&set);
1063 sigaddset(&set, SIGBUS);
1064 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1065
1066 if (sigsetjmp(sigjump, 1)) {
1067 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1068 exit(1);
1069 }
1070
1071 /* MAP_POPULATE silently ignores failures */
1072 for (i = 0; i < (memory/hpagesize)-1; i++) {
1073 memset(area + (hpagesize*i), 0, 1);
1074 }
1075
1076 ret = sigaction(SIGBUS, &oldact, NULL);
1077 if (ret) {
1078 perror("file_ram_alloc: failed to reinstall signal handler");
1079 exit(1);
1080 }
1081
1082 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1083 }
1084
Alex Williamson04b16652010-07-02 11:13:17 -06001085 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001086 return area;
1087}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001088#else
1089static void *file_ram_alloc(RAMBlock *block,
1090 ram_addr_t memory,
1091 const char *path)
1092{
1093 fprintf(stderr, "-mem-path not supported on this host\n");
1094 exit(1);
1095}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001096#endif
1097
Alex Williamsond17b5282010-06-25 11:08:38 -06001098static ram_addr_t find_ram_offset(ram_addr_t size)
1099{
Alex Williamson04b16652010-07-02 11:13:17 -06001100 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001101 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001102
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001103 assert(size != 0); /* it would hand out same offset multiple times */
1104
Paolo Bonzinia3161032012-11-14 15:54:48 +01001105 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001106 return 0;
1107
Paolo Bonzinia3161032012-11-14 15:54:48 +01001108 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001109 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001110
1111 end = block->offset + block->length;
1112
Paolo Bonzinia3161032012-11-14 15:54:48 +01001113 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001114 if (next_block->offset >= end) {
1115 next = MIN(next, next_block->offset);
1116 }
1117 }
1118 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001119 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001120 mingap = next - end;
1121 }
1122 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001123
1124 if (offset == RAM_ADDR_MAX) {
1125 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1126 (uint64_t)size);
1127 abort();
1128 }
1129
Alex Williamson04b16652010-07-02 11:13:17 -06001130 return offset;
1131}
1132
Juan Quintela652d7ec2012-07-20 10:37:54 +02001133ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001134{
Alex Williamsond17b5282010-06-25 11:08:38 -06001135 RAMBlock *block;
1136 ram_addr_t last = 0;
1137
Paolo Bonzinia3161032012-11-14 15:54:48 +01001138 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001139 last = MAX(last, block->offset + block->length);
1140
1141 return last;
1142}
1143
Jason Baronddb97f12012-08-02 15:44:16 -04001144static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1145{
1146 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001147
1148 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001149 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1150 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001151 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1152 if (ret) {
1153 perror("qemu_madvise");
1154 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1155 "but dump_guest_core=off specified\n");
1156 }
1157 }
1158}
1159
Avi Kivityc5705a72011-12-20 15:59:12 +02001160void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001161{
1162 RAMBlock *new_block, *block;
1163
Avi Kivityc5705a72011-12-20 15:59:12 +02001164 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001165 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001166 if (block->offset == addr) {
1167 new_block = block;
1168 break;
1169 }
1170 }
1171 assert(new_block);
1172 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001173
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001174 if (dev) {
1175 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001176 if (id) {
1177 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001178 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001179 }
1180 }
1181 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1182
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001183 /* This assumes the iothread lock is taken here too. */
1184 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001185 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001186 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001187 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1188 new_block->idstr);
1189 abort();
1190 }
1191 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001192 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001193}
1194
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001195static int memory_try_enable_merging(void *addr, size_t len)
1196{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001197 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001198 /* disabled by the user */
1199 return 0;
1200 }
1201
1202 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1203}
1204
Avi Kivityc5705a72011-12-20 15:59:12 +02001205ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1206 MemoryRegion *mr)
1207{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001208 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001209 ram_addr_t old_ram_size, new_ram_size;
1210
1211 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001212
1213 size = TARGET_PAGE_ALIGN(size);
1214 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001215 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001216
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001217 /* This assumes the iothread lock is taken here too. */
1218 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001219 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001220 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001221 if (host) {
1222 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001223 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001224 } else if (xen_enabled()) {
1225 if (mem_path) {
1226 fprintf(stderr, "-mem-path not supported with Xen\n");
1227 exit(1);
1228 }
1229 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001230 } else {
1231 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001232 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1233 /*
1234 * file_ram_alloc() needs to allocate just like
1235 * phys_mem_alloc, but we haven't bothered to provide
1236 * a hook there.
1237 */
1238 fprintf(stderr,
1239 "-mem-path not supported with this accelerator\n");
1240 exit(1);
1241 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001242 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001243 }
1244 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001245 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001246 if (!new_block->host) {
1247 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1248 new_block->mr->name, strerror(errno));
1249 exit(1);
1250 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001251 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001252 }
1253 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001254 new_block->length = size;
1255
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001256 /* Keep the list sorted from biggest to smallest block. */
1257 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1258 if (block->length < new_block->length) {
1259 break;
1260 }
1261 }
1262 if (block) {
1263 QTAILQ_INSERT_BEFORE(block, new_block, next);
1264 } else {
1265 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1266 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001267 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001268
Umesh Deshpandef798b072011-08-18 11:41:17 -07001269 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001270 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001271
Juan Quintela2152f5c2013-10-08 13:52:02 +02001272 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1273
1274 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001275 int i;
1276 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1277 ram_list.dirty_memory[i] =
1278 bitmap_zero_extend(ram_list.dirty_memory[i],
1279 old_ram_size, new_ram_size);
1280 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001281 }
Juan Quintela75218e72013-10-08 12:31:54 +02001282 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001283
Jason Baronddb97f12012-08-02 15:44:16 -04001284 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001285 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001286 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001287
Cam Macdonell84b89d72010-07-26 18:10:57 -06001288 if (kvm_enabled())
1289 kvm_setup_guest_memory(new_block->host, size);
1290
1291 return new_block->offset;
1292}
1293
Avi Kivityc5705a72011-12-20 15:59:12 +02001294ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001295{
Avi Kivityc5705a72011-12-20 15:59:12 +02001296 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001297}
bellarde9a1ab12007-02-08 23:08:38 +00001298
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001299void qemu_ram_free_from_ptr(ram_addr_t addr)
1300{
1301 RAMBlock *block;
1302
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001303 /* This assumes the iothread lock is taken here too. */
1304 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001305 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001306 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001307 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001308 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001309 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001310 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001311 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001312 }
1313 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001314 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001315}
1316
Anthony Liguoric227f092009-10-01 16:12:16 -05001317void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001318{
Alex Williamson04b16652010-07-02 11:13:17 -06001319 RAMBlock *block;
1320
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001321 /* This assumes the iothread lock is taken here too. */
1322 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001323 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001324 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001325 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001326 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001327 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001328 if (block->flags & RAM_PREALLOC_MASK) {
1329 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001330 } else if (xen_enabled()) {
1331 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001332#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001333 } else if (block->fd >= 0) {
1334 munmap(block->host, block->length);
1335 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001336#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001337 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001338 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001339 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001340 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001341 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001342 }
1343 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001344 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001345
bellarde9a1ab12007-02-08 23:08:38 +00001346}
1347
Huang Yingcd19cfa2011-03-02 08:56:19 +01001348#ifndef _WIN32
1349void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1350{
1351 RAMBlock *block;
1352 ram_addr_t offset;
1353 int flags;
1354 void *area, *vaddr;
1355
Paolo Bonzinia3161032012-11-14 15:54:48 +01001356 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001357 offset = addr - block->offset;
1358 if (offset < block->length) {
1359 vaddr = block->host + offset;
1360 if (block->flags & RAM_PREALLOC_MASK) {
1361 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001362 } else if (xen_enabled()) {
1363 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001364 } else {
1365 flags = MAP_FIXED;
1366 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001367 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001368#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001369 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1370 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001371#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001372 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001373#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001374 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1375 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001376 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001377 /*
1378 * Remap needs to match alloc. Accelerators that
1379 * set phys_mem_alloc never remap. If they did,
1380 * we'd need a remap hook here.
1381 */
1382 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1383
Huang Yingcd19cfa2011-03-02 08:56:19 +01001384 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1385 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1386 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001387 }
1388 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001389 fprintf(stderr, "Could not remap addr: "
1390 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001391 length, addr);
1392 exit(1);
1393 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001394 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001395 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001396 }
1397 return;
1398 }
1399 }
1400}
1401#endif /* !_WIN32 */
1402
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001403/* Return a host pointer to ram allocated with qemu_ram_alloc.
1404 With the exception of the softmmu code in this file, this should
1405 only be used for local memory (e.g. video ram) that the device owns,
1406 and knows it isn't going to access beyond the end of the block.
1407
1408 It should not be used for general purpose DMA.
1409 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1410 */
1411void *qemu_get_ram_ptr(ram_addr_t addr)
1412{
1413 RAMBlock *block = qemu_get_ram_block(addr);
1414
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001415 if (xen_enabled()) {
1416 /* We need to check if the requested address is in the RAM
1417 * because we don't want to map the entire memory in QEMU.
1418 * In that case just map until the end of the page.
1419 */
1420 if (block->offset == 0) {
1421 return xen_map_cache(addr, 0, 0);
1422 } else if (block->host == NULL) {
1423 block->host =
1424 xen_map_cache(block->offset, block->length, 1);
1425 }
1426 }
1427 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001428}
1429
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001430/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1431 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001432static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001433{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001434 if (*size == 0) {
1435 return NULL;
1436 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001437 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001438 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001439 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001440 RAMBlock *block;
1441
Paolo Bonzinia3161032012-11-14 15:54:48 +01001442 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001443 if (addr - block->offset < block->length) {
1444 if (addr - block->offset + *size > block->length)
1445 *size = block->length - addr + block->offset;
1446 return block->host + (addr - block->offset);
1447 }
1448 }
1449
1450 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1451 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001452 }
1453}
1454
Paolo Bonzini7443b432013-06-03 12:44:02 +02001455/* Some of the softmmu routines need to translate from a host pointer
1456 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001457MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001458{
pbrook94a6b542009-04-11 17:15:54 +00001459 RAMBlock *block;
1460 uint8_t *host = ptr;
1461
Jan Kiszka868bb332011-06-21 22:59:09 +02001462 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001463 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001464 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001465 }
1466
Paolo Bonzini23887b72013-05-06 14:28:39 +02001467 block = ram_list.mru_block;
1468 if (block && block->host && host - block->host < block->length) {
1469 goto found;
1470 }
1471
Paolo Bonzinia3161032012-11-14 15:54:48 +01001472 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001473 /* This case append when the block is not mapped. */
1474 if (block->host == NULL) {
1475 continue;
1476 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001477 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001478 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001479 }
pbrook94a6b542009-04-11 17:15:54 +00001480 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001481
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001482 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001483
1484found:
1485 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001486 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001487}
Alex Williamsonf471a172010-06-11 11:11:42 -06001488
Avi Kivitya8170e52012-10-23 12:30:10 +02001489static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001490 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001491{
Juan Quintela52159192013-10-08 12:44:04 +02001492 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001493 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001494 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001495 switch (size) {
1496 case 1:
1497 stb_p(qemu_get_ram_ptr(ram_addr), val);
1498 break;
1499 case 2:
1500 stw_p(qemu_get_ram_ptr(ram_addr), val);
1501 break;
1502 case 4:
1503 stl_p(qemu_get_ram_ptr(ram_addr), val);
1504 break;
1505 default:
1506 abort();
1507 }
Juan Quintela52159192013-10-08 12:44:04 +02001508 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1509 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001510 /* we remove the notdirty callback only if the code has been
1511 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001512 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001513 CPUArchState *env = current_cpu->env_ptr;
1514 tlb_set_dirty(env, env->mem_io_vaddr);
1515 }
bellard1ccde1c2004-02-06 19:46:14 +00001516}
1517
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001518static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1519 unsigned size, bool is_write)
1520{
1521 return is_write;
1522}
1523
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001524static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001525 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001526 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001527 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001528};
1529
pbrook0f459d12008-06-09 00:20:13 +00001530/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001531static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001532{
Andreas Färber4917cf42013-05-27 05:17:50 +02001533 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001534 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001535 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001536 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001537 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001538
aliguori06d55cc2008-11-18 20:24:06 +00001539 if (env->watchpoint_hit) {
1540 /* We re-entered the check after replacing the TB. Now raise
1541 * the debug interrupt so that is will trigger after the
1542 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001543 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001544 return;
1545 }
pbrook2e70f6e2008-06-29 01:03:05 +00001546 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001547 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001548 if ((vaddr == (wp->vaddr & len_mask) ||
1549 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001550 wp->flags |= BP_WATCHPOINT_HIT;
1551 if (!env->watchpoint_hit) {
1552 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001553 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001554 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1555 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001556 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001557 } else {
1558 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1559 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001560 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001561 }
aliguori06d55cc2008-11-18 20:24:06 +00001562 }
aliguori6e140f22008-11-18 20:37:55 +00001563 } else {
1564 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001565 }
1566 }
1567}
1568
pbrook6658ffb2007-03-16 23:58:11 +00001569/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1570 so these check for a hit then pass through to the normal out-of-line
1571 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001572static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001573 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001574{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001575 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1576 switch (size) {
1577 case 1: return ldub_phys(addr);
1578 case 2: return lduw_phys(addr);
1579 case 4: return ldl_phys(addr);
1580 default: abort();
1581 }
pbrook6658ffb2007-03-16 23:58:11 +00001582}
1583
Avi Kivitya8170e52012-10-23 12:30:10 +02001584static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001585 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001586{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001587 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1588 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001589 case 1:
1590 stb_phys(addr, val);
1591 break;
1592 case 2:
1593 stw_phys(addr, val);
1594 break;
1595 case 4:
1596 stl_phys(addr, val);
1597 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001598 default: abort();
1599 }
pbrook6658ffb2007-03-16 23:58:11 +00001600}
1601
Avi Kivity1ec9b902012-01-02 12:47:48 +02001602static const MemoryRegionOps watch_mem_ops = {
1603 .read = watch_mem_read,
1604 .write = watch_mem_write,
1605 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001606};
pbrook6658ffb2007-03-16 23:58:11 +00001607
Avi Kivitya8170e52012-10-23 12:30:10 +02001608static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001609 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001610{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001611 subpage_t *subpage = opaque;
1612 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001613
blueswir1db7b5422007-05-26 17:36:03 +00001614#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001615 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001616 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001617#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001618 address_space_read(subpage->as, addr + subpage->base, buf, len);
1619 switch (len) {
1620 case 1:
1621 return ldub_p(buf);
1622 case 2:
1623 return lduw_p(buf);
1624 case 4:
1625 return ldl_p(buf);
1626 default:
1627 abort();
1628 }
blueswir1db7b5422007-05-26 17:36:03 +00001629}
1630
Avi Kivitya8170e52012-10-23 12:30:10 +02001631static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001632 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001633{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001634 subpage_t *subpage = opaque;
1635 uint8_t buf[4];
1636
blueswir1db7b5422007-05-26 17:36:03 +00001637#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001638 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001639 " value %"PRIx64"\n",
1640 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001641#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001642 switch (len) {
1643 case 1:
1644 stb_p(buf, value);
1645 break;
1646 case 2:
1647 stw_p(buf, value);
1648 break;
1649 case 4:
1650 stl_p(buf, value);
1651 break;
1652 default:
1653 abort();
1654 }
1655 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001656}
1657
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001658static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001659 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001660{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001661 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001662#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001663 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001664 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001665#endif
1666
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001667 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001668 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001669}
1670
Avi Kivity70c68e42012-01-02 12:32:48 +02001671static const MemoryRegionOps subpage_ops = {
1672 .read = subpage_read,
1673 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001674 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001675 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001676};
1677
Anthony Liguoric227f092009-10-01 16:12:16 -05001678static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001679 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001680{
1681 int idx, eidx;
1682
1683 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1684 return -1;
1685 idx = SUBPAGE_IDX(start);
1686 eidx = SUBPAGE_IDX(end);
1687#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001688 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1689 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001690#endif
blueswir1db7b5422007-05-26 17:36:03 +00001691 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001692 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001693 }
1694
1695 return 0;
1696}
1697
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001698static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001699{
Anthony Liguoric227f092009-10-01 16:12:16 -05001700 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001701
Anthony Liguori7267c092011-08-20 22:09:37 -05001702 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001703
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001704 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001705 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001706 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001707 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001708 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001709#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001710 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1711 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001712#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001713 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001714
1715 return mmio;
1716}
1717
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001718static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001719{
1720 MemoryRegionSection section = {
1721 .mr = mr,
1722 .offset_within_address_space = 0,
1723 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001724 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001725 };
1726
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001727 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001728}
1729
Avi Kivitya8170e52012-10-23 12:30:10 +02001730MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001731{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001732 return address_space_memory.dispatch->map.sections[
1733 index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001734}
1735
Avi Kivitye9179ce2009-06-14 11:38:52 +03001736static void io_mem_init(void)
1737{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001738 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1739 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001740 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001741 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001742 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001743 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001744 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001745}
1746
Avi Kivityac1970f2012-10-03 16:22:53 +02001747static void mem_begin(MemoryListener *listener)
1748{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001749 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001750 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1751 uint16_t n;
1752
1753 n = dummy_section(&d->map, &io_mem_unassigned);
1754 assert(n == PHYS_SECTION_UNASSIGNED);
1755 n = dummy_section(&d->map, &io_mem_notdirty);
1756 assert(n == PHYS_SECTION_NOTDIRTY);
1757 n = dummy_section(&d->map, &io_mem_rom);
1758 assert(n == PHYS_SECTION_ROM);
1759 n = dummy_section(&d->map, &io_mem_watch);
1760 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001761
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001762 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001763 d->as = as;
1764 as->next_dispatch = d;
1765}
1766
1767static void mem_commit(MemoryListener *listener)
1768{
1769 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001770 AddressSpaceDispatch *cur = as->dispatch;
1771 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001772
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001773 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001774
Paolo Bonzini0475d942013-05-29 12:28:21 +02001775 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001776
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001777 if (cur) {
1778 phys_sections_free(&cur->map);
1779 g_free(cur);
1780 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001781}
1782
Avi Kivity1d711482012-10-02 18:54:45 +02001783static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001784{
Andreas Färber182735e2013-05-29 22:29:20 +02001785 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001786
1787 /* since each CPU stores ram addresses in its TLB cache, we must
1788 reset the modified entries */
1789 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001790 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001791 CPUArchState *env = cpu->env_ptr;
1792
Avi Kivity117712c2012-02-12 21:23:17 +02001793 tlb_flush(env, 1);
1794 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001795}
1796
Avi Kivity93632742012-02-08 16:54:16 +02001797static void core_log_global_start(MemoryListener *listener)
1798{
Juan Quintela981fdf22013-10-10 11:54:09 +02001799 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001800}
1801
1802static void core_log_global_stop(MemoryListener *listener)
1803{
Juan Quintela981fdf22013-10-10 11:54:09 +02001804 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001805}
1806
Avi Kivity93632742012-02-08 16:54:16 +02001807static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001808 .log_global_start = core_log_global_start,
1809 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001810 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001811};
1812
Avi Kivity1d711482012-10-02 18:54:45 +02001813static MemoryListener tcg_memory_listener = {
1814 .commit = tcg_commit,
1815};
1816
Avi Kivityac1970f2012-10-03 16:22:53 +02001817void address_space_init_dispatch(AddressSpace *as)
1818{
Paolo Bonzini00752702013-05-29 12:13:54 +02001819 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001820 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001821 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001822 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001823 .region_add = mem_add,
1824 .region_nop = mem_add,
1825 .priority = 0,
1826 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001827 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001828}
1829
Avi Kivity83f3c252012-10-07 12:59:55 +02001830void address_space_destroy_dispatch(AddressSpace *as)
1831{
1832 AddressSpaceDispatch *d = as->dispatch;
1833
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001834 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001835 g_free(d);
1836 as->dispatch = NULL;
1837}
1838
Avi Kivity62152b82011-07-26 14:26:14 +03001839static void memory_map_init(void)
1840{
Anthony Liguori7267c092011-08-20 22:09:37 -05001841 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001842
Paolo Bonzini57271d62013-11-07 17:14:37 +01001843 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001844 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001845
Anthony Liguori7267c092011-08-20 22:09:37 -05001846 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001847 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1848 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001849 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001850
Avi Kivityf6790af2012-10-02 20:13:51 +02001851 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001852 if (tcg_enabled()) {
1853 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1854 }
Avi Kivity62152b82011-07-26 14:26:14 +03001855}
1856
1857MemoryRegion *get_system_memory(void)
1858{
1859 return system_memory;
1860}
1861
Avi Kivity309cb472011-08-08 16:09:03 +03001862MemoryRegion *get_system_io(void)
1863{
1864 return system_io;
1865}
1866
pbrooke2eef172008-06-08 01:09:01 +00001867#endif /* !defined(CONFIG_USER_ONLY) */
1868
bellard13eb76e2004-01-24 15:23:36 +00001869/* physical memory access (slow version, mainly for debug) */
1870#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001871int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001872 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001873{
1874 int l, flags;
1875 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001876 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001877
1878 while (len > 0) {
1879 page = addr & TARGET_PAGE_MASK;
1880 l = (page + TARGET_PAGE_SIZE) - addr;
1881 if (l > len)
1882 l = len;
1883 flags = page_get_flags(page);
1884 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001885 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001886 if (is_write) {
1887 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001888 return -1;
bellard579a97f2007-11-11 14:26:47 +00001889 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001890 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001891 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001892 memcpy(p, buf, l);
1893 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001894 } else {
1895 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001896 return -1;
bellard579a97f2007-11-11 14:26:47 +00001897 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001898 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001899 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001900 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001901 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001902 }
1903 len -= l;
1904 buf += l;
1905 addr += l;
1906 }
Paul Brooka68fe892010-03-01 00:08:59 +00001907 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001908}
bellard8df1cd02005-01-28 22:37:22 +00001909
bellard13eb76e2004-01-24 15:23:36 +00001910#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001911
Avi Kivitya8170e52012-10-23 12:30:10 +02001912static void invalidate_and_set_dirty(hwaddr addr,
1913 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001914{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001915 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001916 /* invalidate code */
1917 tb_invalidate_phys_page_range(addr, addr + length, 0);
1918 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001919 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1920 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001921 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001922 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001923}
1924
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001925static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1926{
1927 if (memory_region_is_ram(mr)) {
1928 return !(is_write && mr->readonly);
1929 }
1930 if (memory_region_is_romd(mr)) {
1931 return !is_write;
1932 }
1933
1934 return false;
1935}
1936
Richard Henderson23326162013-07-08 14:55:59 -07001937static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001938{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001939 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001940
1941 /* Regions are assumed to support 1-4 byte accesses unless
1942 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001943 if (access_size_max == 0) {
1944 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001945 }
Richard Henderson23326162013-07-08 14:55:59 -07001946
1947 /* Bound the maximum access by the alignment of the address. */
1948 if (!mr->ops->impl.unaligned) {
1949 unsigned align_size_max = addr & -addr;
1950 if (align_size_max != 0 && align_size_max < access_size_max) {
1951 access_size_max = align_size_max;
1952 }
1953 }
1954
1955 /* Don't attempt accesses larger than the maximum. */
1956 if (l > access_size_max) {
1957 l = access_size_max;
1958 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001959 if (l & (l - 1)) {
1960 l = 1 << (qemu_fls(l) - 1);
1961 }
Richard Henderson23326162013-07-08 14:55:59 -07001962
1963 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001964}
1965
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001966bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001967 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001968{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001969 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001970 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001971 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001972 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001973 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001974 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001975
bellard13eb76e2004-01-24 15:23:36 +00001976 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001977 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001978 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001979
bellard13eb76e2004-01-24 15:23:36 +00001980 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001981 if (!memory_access_is_direct(mr, is_write)) {
1982 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001983 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001984 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001985 switch (l) {
1986 case 8:
1987 /* 64 bit write access */
1988 val = ldq_p(buf);
1989 error |= io_mem_write(mr, addr1, val, 8);
1990 break;
1991 case 4:
bellard1c213d12005-09-03 10:49:04 +00001992 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001993 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001994 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001995 break;
1996 case 2:
bellard1c213d12005-09-03 10:49:04 +00001997 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001998 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001999 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002000 break;
2001 case 1:
bellard1c213d12005-09-03 10:49:04 +00002002 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002003 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002004 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002005 break;
2006 default:
2007 abort();
bellard13eb76e2004-01-24 15:23:36 +00002008 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002009 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002010 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002011 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002012 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002013 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002014 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002015 }
2016 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002017 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002018 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002019 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002020 switch (l) {
2021 case 8:
2022 /* 64 bit read access */
2023 error |= io_mem_read(mr, addr1, &val, 8);
2024 stq_p(buf, val);
2025 break;
2026 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002027 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002028 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002029 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002030 break;
2031 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002032 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002033 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002034 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002035 break;
2036 case 1:
bellard1c213d12005-09-03 10:49:04 +00002037 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002038 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002039 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002040 break;
2041 default:
2042 abort();
bellard13eb76e2004-01-24 15:23:36 +00002043 }
2044 } else {
2045 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002046 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002047 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002048 }
2049 }
2050 len -= l;
2051 buf += l;
2052 addr += l;
2053 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002054
2055 return error;
bellard13eb76e2004-01-24 15:23:36 +00002056}
bellard8df1cd02005-01-28 22:37:22 +00002057
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002058bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002059 const uint8_t *buf, int len)
2060{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002061 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002062}
2063
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002064bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002065{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002066 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002067}
2068
2069
Avi Kivitya8170e52012-10-23 12:30:10 +02002070void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002071 int len, int is_write)
2072{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002073 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002074}
2075
Alexander Graf582b55a2013-12-11 14:17:44 +01002076enum write_rom_type {
2077 WRITE_DATA,
2078 FLUSH_CACHE,
2079};
2080
2081static inline void cpu_physical_memory_write_rom_internal(
2082 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002083{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002084 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002085 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002086 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002087 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002088
bellardd0ecd2a2006-04-23 17:14:48 +00002089 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002090 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002091 mr = address_space_translate(&address_space_memory,
2092 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002093
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002094 if (!(memory_region_is_ram(mr) ||
2095 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002096 /* do nothing */
2097 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002098 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002099 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002100 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002101 switch (type) {
2102 case WRITE_DATA:
2103 memcpy(ptr, buf, l);
2104 invalidate_and_set_dirty(addr1, l);
2105 break;
2106 case FLUSH_CACHE:
2107 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2108 break;
2109 }
bellardd0ecd2a2006-04-23 17:14:48 +00002110 }
2111 len -= l;
2112 buf += l;
2113 addr += l;
2114 }
2115}
2116
Alexander Graf582b55a2013-12-11 14:17:44 +01002117/* used for ROM loading : can write in RAM and ROM */
2118void cpu_physical_memory_write_rom(hwaddr addr,
2119 const uint8_t *buf, int len)
2120{
2121 cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2122}
2123
2124void cpu_flush_icache_range(hwaddr start, int len)
2125{
2126 /*
2127 * This function should do the same thing as an icache flush that was
2128 * triggered from within the guest. For TCG we are always cache coherent,
2129 * so there is no need to flush anything. For KVM / Xen we need to flush
2130 * the host's instruction cache at least.
2131 */
2132 if (tcg_enabled()) {
2133 return;
2134 }
2135
2136 cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2137}
2138
aliguori6d16c2f2009-01-22 16:59:11 +00002139typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002140 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002141 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002142 hwaddr addr;
2143 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002144} BounceBuffer;
2145
2146static BounceBuffer bounce;
2147
aliguoriba223c22009-01-22 16:59:16 +00002148typedef struct MapClient {
2149 void *opaque;
2150 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002151 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002152} MapClient;
2153
Blue Swirl72cf2d42009-09-12 07:36:22 +00002154static QLIST_HEAD(map_client_list, MapClient) map_client_list
2155 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002156
2157void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2158{
Anthony Liguori7267c092011-08-20 22:09:37 -05002159 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002160
2161 client->opaque = opaque;
2162 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002163 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002164 return client;
2165}
2166
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002167static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002168{
2169 MapClient *client = (MapClient *)_client;
2170
Blue Swirl72cf2d42009-09-12 07:36:22 +00002171 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002172 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002173}
2174
2175static void cpu_notify_map_clients(void)
2176{
2177 MapClient *client;
2178
Blue Swirl72cf2d42009-09-12 07:36:22 +00002179 while (!QLIST_EMPTY(&map_client_list)) {
2180 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002181 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002182 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002183 }
2184}
2185
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002186bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2187{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002188 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002189 hwaddr l, xlat;
2190
2191 while (len > 0) {
2192 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002193 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2194 if (!memory_access_is_direct(mr, is_write)) {
2195 l = memory_access_size(mr, l, addr);
2196 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002197 return false;
2198 }
2199 }
2200
2201 len -= l;
2202 addr += l;
2203 }
2204 return true;
2205}
2206
aliguori6d16c2f2009-01-22 16:59:11 +00002207/* Map a physical memory region into a host virtual address.
2208 * May map a subset of the requested range, given by and returned in *plen.
2209 * May return NULL if resources needed to perform the mapping are exhausted.
2210 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002211 * Use cpu_register_map_client() to know when retrying the map operation is
2212 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002213 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002214void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002215 hwaddr addr,
2216 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002217 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002218{
Avi Kivitya8170e52012-10-23 12:30:10 +02002219 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002220 hwaddr done = 0;
2221 hwaddr l, xlat, base;
2222 MemoryRegion *mr, *this_mr;
2223 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002224
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002225 if (len == 0) {
2226 return NULL;
2227 }
aliguori6d16c2f2009-01-22 16:59:11 +00002228
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002229 l = len;
2230 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2231 if (!memory_access_is_direct(mr, is_write)) {
2232 if (bounce.buffer) {
2233 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002234 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002235 /* Avoid unbounded allocations */
2236 l = MIN(l, TARGET_PAGE_SIZE);
2237 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002238 bounce.addr = addr;
2239 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002240
2241 memory_region_ref(mr);
2242 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002243 if (!is_write) {
2244 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002245 }
aliguori6d16c2f2009-01-22 16:59:11 +00002246
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002247 *plen = l;
2248 return bounce.buffer;
2249 }
2250
2251 base = xlat;
2252 raddr = memory_region_get_ram_addr(mr);
2253
2254 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002255 len -= l;
2256 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002257 done += l;
2258 if (len == 0) {
2259 break;
2260 }
2261
2262 l = len;
2263 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2264 if (this_mr != mr || xlat != base + done) {
2265 break;
2266 }
aliguori6d16c2f2009-01-22 16:59:11 +00002267 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002268
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002269 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002270 *plen = done;
2271 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002272}
2273
Avi Kivityac1970f2012-10-03 16:22:53 +02002274/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002275 * Will also mark the memory as dirty if is_write == 1. access_len gives
2276 * the amount of memory that was actually read or written by the caller.
2277 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002278void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2279 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002280{
2281 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002282 MemoryRegion *mr;
2283 ram_addr_t addr1;
2284
2285 mr = qemu_ram_addr_from_host(buffer, &addr1);
2286 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002287 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002288 while (access_len) {
2289 unsigned l;
2290 l = TARGET_PAGE_SIZE;
2291 if (l > access_len)
2292 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002293 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002294 addr1 += l;
2295 access_len -= l;
2296 }
2297 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002298 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002299 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002300 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002301 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002302 return;
2303 }
2304 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002305 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002306 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002307 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002308 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002309 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002310 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002311}
bellardd0ecd2a2006-04-23 17:14:48 +00002312
Avi Kivitya8170e52012-10-23 12:30:10 +02002313void *cpu_physical_memory_map(hwaddr addr,
2314 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002315 int is_write)
2316{
2317 return address_space_map(&address_space_memory, addr, plen, is_write);
2318}
2319
Avi Kivitya8170e52012-10-23 12:30:10 +02002320void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2321 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002322{
2323 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2324}
2325
bellard8df1cd02005-01-28 22:37:22 +00002326/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002327static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002328 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002329{
bellard8df1cd02005-01-28 22:37:22 +00002330 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002331 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002332 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002333 hwaddr l = 4;
2334 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002335
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002336 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2337 false);
2338 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002339 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002340 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002341#if defined(TARGET_WORDS_BIGENDIAN)
2342 if (endian == DEVICE_LITTLE_ENDIAN) {
2343 val = bswap32(val);
2344 }
2345#else
2346 if (endian == DEVICE_BIG_ENDIAN) {
2347 val = bswap32(val);
2348 }
2349#endif
bellard8df1cd02005-01-28 22:37:22 +00002350 } else {
2351 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002352 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002353 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002354 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002355 switch (endian) {
2356 case DEVICE_LITTLE_ENDIAN:
2357 val = ldl_le_p(ptr);
2358 break;
2359 case DEVICE_BIG_ENDIAN:
2360 val = ldl_be_p(ptr);
2361 break;
2362 default:
2363 val = ldl_p(ptr);
2364 break;
2365 }
bellard8df1cd02005-01-28 22:37:22 +00002366 }
2367 return val;
2368}
2369
Avi Kivitya8170e52012-10-23 12:30:10 +02002370uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002371{
2372 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2373}
2374
Avi Kivitya8170e52012-10-23 12:30:10 +02002375uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002376{
2377 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2378}
2379
Avi Kivitya8170e52012-10-23 12:30:10 +02002380uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002381{
2382 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2383}
2384
bellard84b7b8e2005-11-28 21:19:04 +00002385/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002386static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002387 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002388{
bellard84b7b8e2005-11-28 21:19:04 +00002389 uint8_t *ptr;
2390 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002391 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002392 hwaddr l = 8;
2393 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002394
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002395 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2396 false);
2397 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002398 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002399 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002400#if defined(TARGET_WORDS_BIGENDIAN)
2401 if (endian == DEVICE_LITTLE_ENDIAN) {
2402 val = bswap64(val);
2403 }
2404#else
2405 if (endian == DEVICE_BIG_ENDIAN) {
2406 val = bswap64(val);
2407 }
2408#endif
bellard84b7b8e2005-11-28 21:19:04 +00002409 } else {
2410 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002411 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002412 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002413 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002414 switch (endian) {
2415 case DEVICE_LITTLE_ENDIAN:
2416 val = ldq_le_p(ptr);
2417 break;
2418 case DEVICE_BIG_ENDIAN:
2419 val = ldq_be_p(ptr);
2420 break;
2421 default:
2422 val = ldq_p(ptr);
2423 break;
2424 }
bellard84b7b8e2005-11-28 21:19:04 +00002425 }
2426 return val;
2427}
2428
Avi Kivitya8170e52012-10-23 12:30:10 +02002429uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002430{
2431 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2432}
2433
Avi Kivitya8170e52012-10-23 12:30:10 +02002434uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002435{
2436 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2437}
2438
Avi Kivitya8170e52012-10-23 12:30:10 +02002439uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440{
2441 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2442}
2443
bellardaab33092005-10-30 20:48:42 +00002444/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002445uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002446{
2447 uint8_t val;
2448 cpu_physical_memory_read(addr, &val, 1);
2449 return val;
2450}
2451
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002452/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002453static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002454 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002455{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002456 uint8_t *ptr;
2457 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002458 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002459 hwaddr l = 2;
2460 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002461
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002462 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2463 false);
2464 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002465 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002466 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002467#if defined(TARGET_WORDS_BIGENDIAN)
2468 if (endian == DEVICE_LITTLE_ENDIAN) {
2469 val = bswap16(val);
2470 }
2471#else
2472 if (endian == DEVICE_BIG_ENDIAN) {
2473 val = bswap16(val);
2474 }
2475#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002476 } else {
2477 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002478 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002479 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002480 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002481 switch (endian) {
2482 case DEVICE_LITTLE_ENDIAN:
2483 val = lduw_le_p(ptr);
2484 break;
2485 case DEVICE_BIG_ENDIAN:
2486 val = lduw_be_p(ptr);
2487 break;
2488 default:
2489 val = lduw_p(ptr);
2490 break;
2491 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002492 }
2493 return val;
bellardaab33092005-10-30 20:48:42 +00002494}
2495
Avi Kivitya8170e52012-10-23 12:30:10 +02002496uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002497{
2498 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2499}
2500
Avi Kivitya8170e52012-10-23 12:30:10 +02002501uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002502{
2503 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2504}
2505
Avi Kivitya8170e52012-10-23 12:30:10 +02002506uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002507{
2508 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2509}
2510
bellard8df1cd02005-01-28 22:37:22 +00002511/* warning: addr must be aligned. The ram page is not masked as dirty
2512 and the code inside is not invalidated. It is useful if the dirty
2513 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002514void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002515{
bellard8df1cd02005-01-28 22:37:22 +00002516 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002517 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002518 hwaddr l = 4;
2519 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002520
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002521 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2522 true);
2523 if (l < 4 || !memory_access_is_direct(mr, true)) {
2524 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002525 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002526 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002527 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002528 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002529
2530 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002531 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002532 /* invalidate code */
2533 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2534 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002535 cpu_physical_memory_set_dirty_flag(addr1,
2536 DIRTY_MEMORY_MIGRATION);
2537 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002538 }
2539 }
bellard8df1cd02005-01-28 22:37:22 +00002540 }
2541}
2542
2543/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002544static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002545 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002546{
bellard8df1cd02005-01-28 22:37:22 +00002547 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002548 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002549 hwaddr l = 4;
2550 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002551
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002552 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2553 true);
2554 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002555#if defined(TARGET_WORDS_BIGENDIAN)
2556 if (endian == DEVICE_LITTLE_ENDIAN) {
2557 val = bswap32(val);
2558 }
2559#else
2560 if (endian == DEVICE_BIG_ENDIAN) {
2561 val = bswap32(val);
2562 }
2563#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002564 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002565 } else {
bellard8df1cd02005-01-28 22:37:22 +00002566 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002567 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002568 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002569 switch (endian) {
2570 case DEVICE_LITTLE_ENDIAN:
2571 stl_le_p(ptr, val);
2572 break;
2573 case DEVICE_BIG_ENDIAN:
2574 stl_be_p(ptr, val);
2575 break;
2576 default:
2577 stl_p(ptr, val);
2578 break;
2579 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002580 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002581 }
2582}
2583
Avi Kivitya8170e52012-10-23 12:30:10 +02002584void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002585{
2586 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2587}
2588
Avi Kivitya8170e52012-10-23 12:30:10 +02002589void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002590{
2591 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2592}
2593
Avi Kivitya8170e52012-10-23 12:30:10 +02002594void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002595{
2596 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2597}
2598
bellardaab33092005-10-30 20:48:42 +00002599/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002600void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002601{
2602 uint8_t v = val;
2603 cpu_physical_memory_write(addr, &v, 1);
2604}
2605
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002606/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002607static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002608 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002609{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002610 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002611 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002612 hwaddr l = 2;
2613 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002614
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002615 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2616 true);
2617 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002618#if defined(TARGET_WORDS_BIGENDIAN)
2619 if (endian == DEVICE_LITTLE_ENDIAN) {
2620 val = bswap16(val);
2621 }
2622#else
2623 if (endian == DEVICE_BIG_ENDIAN) {
2624 val = bswap16(val);
2625 }
2626#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002627 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002628 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002629 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002630 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002631 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002632 switch (endian) {
2633 case DEVICE_LITTLE_ENDIAN:
2634 stw_le_p(ptr, val);
2635 break;
2636 case DEVICE_BIG_ENDIAN:
2637 stw_be_p(ptr, val);
2638 break;
2639 default:
2640 stw_p(ptr, val);
2641 break;
2642 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002643 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002644 }
bellardaab33092005-10-30 20:48:42 +00002645}
2646
Avi Kivitya8170e52012-10-23 12:30:10 +02002647void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002648{
2649 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2650}
2651
Avi Kivitya8170e52012-10-23 12:30:10 +02002652void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002653{
2654 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2655}
2656
Avi Kivitya8170e52012-10-23 12:30:10 +02002657void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002658{
2659 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2660}
2661
bellardaab33092005-10-30 20:48:42 +00002662/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002663void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002664{
2665 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002666 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002667}
2668
Avi Kivitya8170e52012-10-23 12:30:10 +02002669void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002670{
2671 val = cpu_to_le64(val);
2672 cpu_physical_memory_write(addr, &val, 8);
2673}
2674
Avi Kivitya8170e52012-10-23 12:30:10 +02002675void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002676{
2677 val = cpu_to_be64(val);
2678 cpu_physical_memory_write(addr, &val, 8);
2679}
2680
aliguori5e2972f2009-03-28 17:51:36 +00002681/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002682int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002683 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002684{
2685 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002686 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002687 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002688
2689 while (len > 0) {
2690 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002691 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002692 /* if no physical page mapped, return an error */
2693 if (phys_addr == -1)
2694 return -1;
2695 l = (page + TARGET_PAGE_SIZE) - addr;
2696 if (l > len)
2697 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002698 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002699 if (is_write)
2700 cpu_physical_memory_write_rom(phys_addr, buf, l);
2701 else
aliguori5e2972f2009-03-28 17:51:36 +00002702 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002703 len -= l;
2704 buf += l;
2705 addr += l;
2706 }
2707 return 0;
2708}
Paul Brooka68fe892010-03-01 00:08:59 +00002709#endif
bellard13eb76e2004-01-24 15:23:36 +00002710
Blue Swirl8e4a4242013-01-06 18:30:17 +00002711#if !defined(CONFIG_USER_ONLY)
2712
2713/*
2714 * A helper function for the _utterly broken_ virtio device model to find out if
2715 * it's running on a big endian machine. Don't do this at home kids!
2716 */
2717bool virtio_is_big_endian(void);
2718bool virtio_is_big_endian(void)
2719{
2720#if defined(TARGET_WORDS_BIGENDIAN)
2721 return true;
2722#else
2723 return false;
2724#endif
2725}
2726
2727#endif
2728
Wen Congyang76f35532012-05-07 12:04:18 +08002729#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002730bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002731{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002732 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002733 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002734
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002735 mr = address_space_translate(&address_space_memory,
2736 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002737
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002738 return !(memory_region_is_ram(mr) ||
2739 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002740}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002741
2742void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2743{
2744 RAMBlock *block;
2745
2746 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2747 func(block->host, block->offset, block->length, opaque);
2748 }
2749}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002750#endif