blob: e3e5bc0ca270a0d70a5046dbd8f5bb84754ba523 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020066
Paolo Bonzini0844e002013-05-24 14:37:28 +020067MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020068static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069
pbrooke2eef172008-06-08 01:09:01 +000070#endif
bellard9fa3e852004-01-04 18:06:42 +000071
Andreas Färberbdc44642013-06-24 23:50:24 +020072struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000073/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020075DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000076/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000077 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000078 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010079int use_icount;
bellard6a00d602005-11-21 23:25:50 +000080
pbrooke2eef172008-06-08 01:09:01 +000081#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020082
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020083typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020086 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
87 uint16_t skip : 1;
88 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020089 uint16_t ptr : 15;
90};
91
Paolo Bonzini03f49952013-11-07 17:14:36 +010092/* Size of the L2 (and L3, etc) page tables. */
93#define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
94
95#define P_L2_BITS 10
96#define P_L2_SIZE (1 << P_L2_BITS)
97
98#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
99
100typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200101
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200102struct AddressSpaceDispatch {
103 /* This is a multi-level map on the physical address space.
104 * The bottom level has pointers to MemoryRegionSections.
105 */
106 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +0200107 Node *nodes;
108 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200109 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Jan Kiszka90260c62013-05-26 21:46:51 +0200112#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
113typedef struct subpage_t {
114 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200115 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200116 hwaddr base;
117 uint16_t sub_section[TARGET_PAGE_SIZE];
118} subpage_t;
119
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200120#define PHYS_SECTION_UNASSIGNED 0
121#define PHYS_SECTION_NOTDIRTY 1
122#define PHYS_SECTION_ROM 2
123#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200124
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200125typedef struct PhysPageMap {
126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132} PhysPageMap;
133
Paolo Bonzini60926662013-05-29 12:30:26 +0200134static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200135static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200136
Avi Kivity07f07b32012-02-13 20:45:32 +0200137#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200138
pbrooke2eef172008-06-08 01:09:01 +0000139static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300140static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000141
Avi Kivity1ec9b902012-01-02 12:47:48 +0200142static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Paul Brook6d9a1302010-02-28 23:55:53 +0000145#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200146
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147static void phys_map_node_reserve(unsigned nodes)
148{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200149 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
150 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
151 16);
152 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
153 next_map.nodes_nb + nodes);
154 next_map.nodes = g_renew(Node, next_map.nodes,
155 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200156 }
157}
158
159static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160{
161 unsigned i;
162 uint16_t ret;
163
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200164 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200166 assert(ret != next_map.nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100167 for (i = 0; i < P_L2_SIZE; ++i) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200168 next_map.nodes[ret][i].skip = 1;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200169 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200170 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200171 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200172}
173
Avi Kivitya8170e52012-10-23 12:30:10 +0200174static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
175 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200176 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200177{
178 PhysPageEntry *p;
179 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100180 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200181
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200182 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200183 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200184 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100186 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200187 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200188 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189 }
190 }
191 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200192 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100194 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200195
Paolo Bonzini03f49952013-11-07 17:14:36 +0100196 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200197 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200198 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200199 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200200 *index += step;
201 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200202 } else {
203 phys_page_set_level(lp, index, nb, leaf, level - 1);
204 }
205 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200206 }
207}
208
Avi Kivityac1970f2012-10-03 16:22:53 +0200209static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200210 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200211 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000212{
Avi Kivity29990972012-02-13 20:21:20 +0200213 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200214 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000215
Avi Kivityac1970f2012-10-03 16:22:53 +0200216 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000217}
218
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200219static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
220 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000221{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200222 PhysPageEntry *p;
223 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200224
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200225 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200226 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200227 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200228 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200229 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200231 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200232 return &sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200233}
234
Blue Swirle5548612012-04-21 13:08:33 +0000235bool memory_region_is_unassigned(MemoryRegion *mr)
236{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200237 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000238 && mr != &io_mem_watch;
239}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200240
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200241static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200242 hwaddr addr,
243 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200244{
Jan Kiszka90260c62013-05-26 21:46:51 +0200245 MemoryRegionSection *section;
246 subpage_t *subpage;
247
Paolo Bonzini0475d942013-05-29 12:28:21 +0200248 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
249 d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200250 if (resolve_subpage && section->mr->subpage) {
251 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200252 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200253 }
254 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200255}
256
Jan Kiszka90260c62013-05-26 21:46:51 +0200257static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200258address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200259 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200260{
261 MemoryRegionSection *section;
262 Int128 diff;
263
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200264 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200265 /* Compute offset within MemoryRegionSection */
266 addr -= section->offset_within_address_space;
267
268 /* Compute offset within MemoryRegion */
269 *xlat = addr + section->offset_within_region;
270
271 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100272 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200273 return section;
274}
Jan Kiszka90260c62013-05-26 21:46:51 +0200275
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200276MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
277 hwaddr *xlat, hwaddr *plen,
278 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200279{
Avi Kivity30951152012-10-30 13:47:46 +0200280 IOMMUTLBEntry iotlb;
281 MemoryRegionSection *section;
282 MemoryRegion *mr;
283 hwaddr len = *plen;
284
285 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200286 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200287 mr = section->mr;
288
289 if (!mr->iommu_ops) {
290 break;
291 }
292
293 iotlb = mr->iommu_ops->translate(mr, addr);
294 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
295 | (addr & iotlb.addr_mask));
296 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
297 if (!(iotlb.perm & (1 << is_write))) {
298 mr = &io_mem_unassigned;
299 break;
300 }
301
302 as = iotlb.target_as;
303 }
304
305 *plen = len;
306 *xlat = addr;
307 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200308}
309
310MemoryRegionSection *
311address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
312 hwaddr *plen)
313{
Avi Kivity30951152012-10-30 13:47:46 +0200314 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200315 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200316
317 assert(!section->mr->iommu_ops);
318 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200319}
bellard9fa3e852004-01-04 18:06:42 +0000320#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000321
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200322void cpu_exec_init_all(void)
323{
324#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700325 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200326 memory_map_init();
327 io_mem_init();
328#endif
329}
330
Andreas Färberb170fce2013-01-20 20:23:22 +0100331#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000332
Juan Quintelae59fb372009-09-29 22:48:21 +0200333static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200334{
Andreas Färber259186a2013-01-17 18:51:17 +0100335 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200336
aurel323098dba2009-03-07 21:28:24 +0000337 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
338 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100339 cpu->interrupt_request &= ~0x01;
340 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000341
342 return 0;
343}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200344
Andreas Färber1a1562f2013-06-17 04:09:11 +0200345const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200346 .name = "cpu_common",
347 .version_id = 1,
348 .minimum_version_id = 1,
349 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200350 .post_load = cpu_common_post_load,
351 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100352 VMSTATE_UINT32(halted, CPUState),
353 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200354 VMSTATE_END_OF_LIST()
355 }
356};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200357
pbrook9656f322008-07-01 20:01:19 +0000358#endif
359
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100360CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400361{
Andreas Färberbdc44642013-06-24 23:50:24 +0200362 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400363
Andreas Färberbdc44642013-06-24 23:50:24 +0200364 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100365 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200366 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100367 }
Glauber Costa950f1472009-06-09 12:15:18 -0400368 }
369
Andreas Färberbdc44642013-06-24 23:50:24 +0200370 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400371}
372
Andreas Färber9349b4f2012-03-14 01:38:32 +0100373void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000374{
Andreas Färber9f09e182012-05-03 06:59:07 +0200375 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100376 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200377 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000378 int cpu_index;
379
pbrookc2764712009-03-07 15:24:59 +0000380#if defined(CONFIG_USER_ONLY)
381 cpu_list_lock();
382#endif
bellard6a00d602005-11-21 23:25:50 +0000383 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200384 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000385 cpu_index++;
386 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100387 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100388 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000389 QTAILQ_INIT(&env->breakpoints);
390 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100391#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200392 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100393#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200394 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000395#if defined(CONFIG_USER_ONLY)
396 cpu_list_unlock();
397#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200398 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
399 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
400 }
pbrookb3c77242008-06-30 16:31:04 +0000401#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600402 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000403 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100404 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200405 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000406#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100407 if (cc->vmsd != NULL) {
408 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
409 }
bellardfd6ce8f2003-05-14 19:00:11 +0000410}
411
bellard1fddef42005-04-17 19:16:13 +0000412#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000413#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200414static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000415{
416 tb_invalidate_phys_page_range(pc, pc + 1, 0);
417}
418#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200419static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400420{
Max Filippove8262a12013-09-27 22:29:17 +0400421 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
422 if (phys != -1) {
423 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
424 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400425}
bellardc27004e2005-01-03 23:35:10 +0000426#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000427#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000428
Paul Brookc527ee82010-03-01 03:31:14 +0000429#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100430void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000431
432{
433}
434
Andreas Färber9349b4f2012-03-14 01:38:32 +0100435int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000436 int flags, CPUWatchpoint **watchpoint)
437{
438 return -ENOSYS;
439}
440#else
pbrook6658ffb2007-03-16 23:58:11 +0000441/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100442int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000443 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000444{
aliguorib4051332008-11-18 20:14:20 +0000445 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000446 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000447
aliguorib4051332008-11-18 20:14:20 +0000448 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400449 if ((len & (len - 1)) || (addr & ~len_mask) ||
450 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000451 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
452 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
453 return -EINVAL;
454 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500455 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000456
aliguoria1d1bb32008-11-18 20:07:32 +0000457 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000458 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000459 wp->flags = flags;
460
aliguori2dc9f412008-11-18 20:56:59 +0000461 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000462 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000463 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000464 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000465 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000466
pbrook6658ffb2007-03-16 23:58:11 +0000467 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000468
469 if (watchpoint)
470 *watchpoint = wp;
471 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000472}
473
aliguoria1d1bb32008-11-18 20:07:32 +0000474/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100475int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000476 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000477{
aliguorib4051332008-11-18 20:14:20 +0000478 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000479 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000480
Blue Swirl72cf2d42009-09-12 07:36:22 +0000481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000482 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000483 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000484 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000485 return 0;
486 }
487 }
aliguoria1d1bb32008-11-18 20:07:32 +0000488 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000489}
490
aliguoria1d1bb32008-11-18 20:07:32 +0000491/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100492void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000493{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000494 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000495
aliguoria1d1bb32008-11-18 20:07:32 +0000496 tlb_flush_page(env, watchpoint->vaddr);
497
Anthony Liguori7267c092011-08-20 22:09:37 -0500498 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000499}
500
aliguoria1d1bb32008-11-18 20:07:32 +0000501/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100502void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000503{
aliguoric0ce9982008-11-25 22:13:57 +0000504 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000505
Blue Swirl72cf2d42009-09-12 07:36:22 +0000506 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000507 if (wp->flags & mask)
508 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000509 }
aliguoria1d1bb32008-11-18 20:07:32 +0000510}
Paul Brookc527ee82010-03-01 03:31:14 +0000511#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000512
513/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100514int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000515 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000516{
bellard1fddef42005-04-17 19:16:13 +0000517#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000518 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000519
Anthony Liguori7267c092011-08-20 22:09:37 -0500520 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000521
522 bp->pc = pc;
523 bp->flags = flags;
524
aliguori2dc9f412008-11-18 20:56:59 +0000525 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200526 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000527 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200528 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000529 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200530 }
aliguoria1d1bb32008-11-18 20:07:32 +0000531
Andreas Färber00b941e2013-06-29 18:55:54 +0200532 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000533
Andreas Färber00b941e2013-06-29 18:55:54 +0200534 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000535 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200536 }
aliguoria1d1bb32008-11-18 20:07:32 +0000537 return 0;
538#else
539 return -ENOSYS;
540#endif
541}
542
543/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000545{
546#if defined(TARGET_HAS_ICE)
547 CPUBreakpoint *bp;
548
Blue Swirl72cf2d42009-09-12 07:36:22 +0000549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000552 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000553 }
bellard4c3a88a2003-07-26 12:06:08 +0000554 }
aliguoria1d1bb32008-11-18 20:07:32 +0000555 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000556#else
aliguoria1d1bb32008-11-18 20:07:32 +0000557 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000558#endif
559}
560
aliguoria1d1bb32008-11-18 20:07:32 +0000561/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000563{
bellard1fddef42005-04-17 19:16:13 +0000564#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000566
Andreas Färber00b941e2013-06-29 18:55:54 +0200567 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000568
Anthony Liguori7267c092011-08-20 22:09:37 -0500569 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000570#endif
571}
572
573/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000575{
576#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000577 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000578
Blue Swirl72cf2d42009-09-12 07:36:22 +0000579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000582 }
bellard4c3a88a2003-07-26 12:06:08 +0000583#endif
584}
585
bellardc33a3462003-07-29 20:50:33 +0000586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200588void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000589{
bellard1fddef42005-04-17 19:16:13 +0000590#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200591 if (cpu->singlestep_enabled != enabled) {
592 cpu->singlestep_enabled = enabled;
593 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200594 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200595 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100596 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000597 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200598 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000599 tb_flush(env);
600 }
bellardc33a3462003-07-29 20:50:33 +0000601 }
602#endif
603}
604
Andreas Färber9349b4f2012-03-14 01:38:32 +0100605void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000606{
Andreas Färber878096e2013-05-27 01:33:50 +0200607 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000608 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000609 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000610
611 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000612 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000613 fprintf(stderr, "qemu: fatal: ");
614 vfprintf(stderr, fmt, ap);
615 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200616 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000617 if (qemu_log_enabled()) {
618 qemu_log("qemu: fatal: ");
619 qemu_log_vprintf(fmt, ap2);
620 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200621 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000622 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000623 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000624 }
pbrook493ae1f2007-11-23 16:53:59 +0000625 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000626 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200627#if defined(CONFIG_USER_ONLY)
628 {
629 struct sigaction act;
630 sigfillset(&act.sa_mask);
631 act.sa_handler = SIG_DFL;
632 sigaction(SIGABRT, &act, NULL);
633 }
634#endif
bellard75012672003-06-21 13:11:07 +0000635 abort();
636}
637
bellard01243112004-01-04 15:48:17 +0000638#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200639static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
640{
641 RAMBlock *block;
642
643 /* The list is protected by the iothread lock here. */
644 block = ram_list.mru_block;
645 if (block && addr - block->offset < block->length) {
646 goto found;
647 }
648 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
649 if (addr - block->offset < block->length) {
650 goto found;
651 }
652 }
653
654 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
655 abort();
656
657found:
658 ram_list.mru_block = block;
659 return block;
660}
661
Juan Quintelad24981d2012-05-22 00:42:40 +0200662static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
663 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000664{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200665 RAMBlock *block;
666 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000667
Paolo Bonzini041603f2013-09-09 17:49:45 +0200668 block = qemu_get_ram_block(start);
669 assert(block == qemu_get_ram_block(end - 1));
670 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000671 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200672}
673
674/* Note: start and end must be within the same ram block. */
675void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
676 int dirty_flags)
677{
678 uintptr_t length;
679
680 start &= TARGET_PAGE_MASK;
681 end = TARGET_PAGE_ALIGN(end);
682
683 length = end - start;
684 if (length == 0)
685 return;
686 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
687
688 if (tcg_enabled()) {
689 tlb_reset_dirty_range_all(start, end, length);
690 }
bellard1ccde1c2004-02-06 19:46:14 +0000691}
692
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000693static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000694{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200695 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000696 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200697 return ret;
aliguori74576192008-10-06 14:02:03 +0000698}
699
Avi Kivitya8170e52012-10-23 12:30:10 +0200700hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200701 MemoryRegionSection *section,
702 target_ulong vaddr,
703 hwaddr paddr, hwaddr xlat,
704 int prot,
705 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000706{
Avi Kivitya8170e52012-10-23 12:30:10 +0200707 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000708 CPUWatchpoint *wp;
709
Blue Swirlcc5bea62012-04-14 14:56:48 +0000710 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000711 /* Normal RAM. */
712 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200713 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000714 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200715 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000716 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200717 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000718 }
719 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200720 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200721 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000722 }
723
724 /* Make accesses to pages with watchpoints go via the
725 watchpoint trap routines. */
726 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
727 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
728 /* Avoid trapping reads of pages with a write breakpoint. */
729 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200730 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000731 *address |= TLB_MMIO;
732 break;
733 }
734 }
735 }
736
737 return iotlb;
738}
bellard9fa3e852004-01-04 18:06:42 +0000739#endif /* defined(CONFIG_USER_ONLY) */
740
pbrooke2eef172008-06-08 01:09:01 +0000741#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000742
Anthony Liguoric227f092009-10-01 16:12:16 -0500743static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200744 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200745static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200746
Stefan Weil575ddeb2013-09-29 20:56:45 +0200747static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200748
749/*
750 * Set a custom physical guest memory alloator.
751 * Accelerators with unusual needs may need this. Hopefully, we can
752 * get rid of it eventually.
753 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200754void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200755{
756 phys_mem_alloc = alloc;
757}
758
Avi Kivity5312bd82012-02-12 18:32:55 +0200759static uint16_t phys_section_add(MemoryRegionSection *section)
760{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200761 /* The physical section number is ORed with a page-aligned
762 * pointer to produce the iotlb entries. Thus it should
763 * never overflow into the page-aligned value.
764 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200765 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200766
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200767 if (next_map.sections_nb == next_map.sections_nb_alloc) {
768 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
769 16);
770 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
771 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200772 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200773 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200774 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200775 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200776}
777
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200778static void phys_section_destroy(MemoryRegion *mr)
779{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200780 memory_region_unref(mr);
781
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200782 if (mr->subpage) {
783 subpage_t *subpage = container_of(mr, subpage_t, iomem);
784 memory_region_destroy(&subpage->iomem);
785 g_free(subpage);
786 }
787}
788
Paolo Bonzini60926662013-05-29 12:30:26 +0200789static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200790{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200791 while (map->sections_nb > 0) {
792 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200793 phys_section_destroy(section->mr);
794 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200795 g_free(map->sections);
796 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200797 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200798}
799
Avi Kivityac1970f2012-10-03 16:22:53 +0200800static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200801{
802 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200803 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200804 & TARGET_PAGE_MASK;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200805 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
806 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200807 MemoryRegionSection subsection = {
808 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200809 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200810 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200811 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200812
Avi Kivityf3705d52012-03-08 16:16:34 +0200813 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200814
Avi Kivityf3705d52012-03-08 16:16:34 +0200815 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200816 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200817 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200818 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200819 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200820 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200821 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200822 }
823 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200824 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200825 subpage_register(subpage, start, end, phys_section_add(section));
826}
827
828
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200829static void register_multipage(AddressSpaceDispatch *d,
830 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000831{
Avi Kivitya8170e52012-10-23 12:30:10 +0200832 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200833 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200834 uint64_t num_pages = int128_get64(int128_rshift(section->size,
835 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200836
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200837 assert(num_pages);
838 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000839}
840
Avi Kivityac1970f2012-10-03 16:22:53 +0200841static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200842{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200843 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200844 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200845 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200846 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200847
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200848 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
849 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
850 - now.offset_within_address_space;
851
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200852 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200853 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200854 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200855 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200856 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200857 while (int128_ne(remain.size, now.size)) {
858 remain.size = int128_sub(remain.size, now.size);
859 remain.offset_within_address_space += int128_get64(now.size);
860 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400861 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200862 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200863 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800864 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200865 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200866 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400867 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200868 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200869 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400870 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200871 }
872}
873
Sheng Yang62a27442010-01-26 19:21:16 +0800874void qemu_flush_coalesced_mmio_buffer(void)
875{
876 if (kvm_enabled())
877 kvm_flush_coalesced_mmio_buffer();
878}
879
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700880void qemu_mutex_lock_ramlist(void)
881{
882 qemu_mutex_lock(&ram_list.mutex);
883}
884
885void qemu_mutex_unlock_ramlist(void)
886{
887 qemu_mutex_unlock(&ram_list.mutex);
888}
889
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200890#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300891
892#include <sys/vfs.h>
893
894#define HUGETLBFS_MAGIC 0x958458f6
895
896static long gethugepagesize(const char *path)
897{
898 struct statfs fs;
899 int ret;
900
901 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900902 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300903 } while (ret != 0 && errno == EINTR);
904
905 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900906 perror(path);
907 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300908 }
909
910 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900911 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300912
913 return fs.f_bsize;
914}
915
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200916static sigjmp_buf sigjump;
917
918static void sigbus_handler(int signal)
919{
920 siglongjmp(sigjump, 1);
921}
922
Alex Williamson04b16652010-07-02 11:13:17 -0600923static void *file_ram_alloc(RAMBlock *block,
924 ram_addr_t memory,
925 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300926{
927 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500928 char *sanitized_name;
929 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300930 void *area;
931 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300932 unsigned long hpagesize;
933
934 hpagesize = gethugepagesize(path);
935 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900936 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300937 }
938
939 if (memory < hpagesize) {
940 return NULL;
941 }
942
943 if (kvm_enabled() && !kvm_has_sync_mmu()) {
944 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
945 return NULL;
946 }
947
Peter Feiner8ca761f2013-03-04 13:54:25 -0500948 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
949 sanitized_name = g_strdup(block->mr->name);
950 for (c = sanitized_name; *c != '\0'; c++) {
951 if (*c == '/')
952 *c = '_';
953 }
954
955 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
956 sanitized_name);
957 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300958
959 fd = mkstemp(filename);
960 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900961 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100962 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900963 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300964 }
965 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100966 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300967
968 memory = (memory+hpagesize-1) & ~(hpagesize-1);
969
970 /*
971 * ftruncate is not supported by hugetlbfs in older
972 * hosts, so don't bother bailing out on errors.
973 * If anything goes wrong with it under other filesystems,
974 * mmap will fail.
975 */
976 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900977 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300978
Marcelo Tosattic9027602010-03-01 20:25:08 -0300979 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300980 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900981 perror("file_ram_alloc: can't mmap RAM pages");
982 close(fd);
983 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300984 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200985
986 if (mem_prealloc) {
987 int ret, i;
988 struct sigaction act, oldact;
989 sigset_t set, oldset;
990
991 memset(&act, 0, sizeof(act));
992 act.sa_handler = &sigbus_handler;
993 act.sa_flags = 0;
994
995 ret = sigaction(SIGBUS, &act, &oldact);
996 if (ret) {
997 perror("file_ram_alloc: failed to install signal handler");
998 exit(1);
999 }
1000
1001 /* unblock SIGBUS */
1002 sigemptyset(&set);
1003 sigaddset(&set, SIGBUS);
1004 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1005
1006 if (sigsetjmp(sigjump, 1)) {
1007 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1008 exit(1);
1009 }
1010
1011 /* MAP_POPULATE silently ignores failures */
1012 for (i = 0; i < (memory/hpagesize)-1; i++) {
1013 memset(area + (hpagesize*i), 0, 1);
1014 }
1015
1016 ret = sigaction(SIGBUS, &oldact, NULL);
1017 if (ret) {
1018 perror("file_ram_alloc: failed to reinstall signal handler");
1019 exit(1);
1020 }
1021
1022 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1023 }
1024
Alex Williamson04b16652010-07-02 11:13:17 -06001025 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001026 return area;
1027}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001028#else
1029static void *file_ram_alloc(RAMBlock *block,
1030 ram_addr_t memory,
1031 const char *path)
1032{
1033 fprintf(stderr, "-mem-path not supported on this host\n");
1034 exit(1);
1035}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001036#endif
1037
Alex Williamsond17b5282010-06-25 11:08:38 -06001038static ram_addr_t find_ram_offset(ram_addr_t size)
1039{
Alex Williamson04b16652010-07-02 11:13:17 -06001040 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001041 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001042
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001043 assert(size != 0); /* it would hand out same offset multiple times */
1044
Paolo Bonzinia3161032012-11-14 15:54:48 +01001045 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001046 return 0;
1047
Paolo Bonzinia3161032012-11-14 15:54:48 +01001048 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001049 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001050
1051 end = block->offset + block->length;
1052
Paolo Bonzinia3161032012-11-14 15:54:48 +01001053 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001054 if (next_block->offset >= end) {
1055 next = MIN(next, next_block->offset);
1056 }
1057 }
1058 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001059 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001060 mingap = next - end;
1061 }
1062 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001063
1064 if (offset == RAM_ADDR_MAX) {
1065 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1066 (uint64_t)size);
1067 abort();
1068 }
1069
Alex Williamson04b16652010-07-02 11:13:17 -06001070 return offset;
1071}
1072
Juan Quintela652d7ec2012-07-20 10:37:54 +02001073ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001074{
Alex Williamsond17b5282010-06-25 11:08:38 -06001075 RAMBlock *block;
1076 ram_addr_t last = 0;
1077
Paolo Bonzinia3161032012-11-14 15:54:48 +01001078 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001079 last = MAX(last, block->offset + block->length);
1080
1081 return last;
1082}
1083
Jason Baronddb97f12012-08-02 15:44:16 -04001084static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1085{
1086 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001087
1088 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001089 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1090 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001091 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1092 if (ret) {
1093 perror("qemu_madvise");
1094 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1095 "but dump_guest_core=off specified\n");
1096 }
1097 }
1098}
1099
Avi Kivityc5705a72011-12-20 15:59:12 +02001100void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001101{
1102 RAMBlock *new_block, *block;
1103
Avi Kivityc5705a72011-12-20 15:59:12 +02001104 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001105 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001106 if (block->offset == addr) {
1107 new_block = block;
1108 break;
1109 }
1110 }
1111 assert(new_block);
1112 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001113
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001114 if (dev) {
1115 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001116 if (id) {
1117 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001118 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001119 }
1120 }
1121 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1122
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001123 /* This assumes the iothread lock is taken here too. */
1124 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001125 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001126 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001127 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1128 new_block->idstr);
1129 abort();
1130 }
1131 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001132 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001133}
1134
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001135static int memory_try_enable_merging(void *addr, size_t len)
1136{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001137 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001138 /* disabled by the user */
1139 return 0;
1140 }
1141
1142 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1143}
1144
Avi Kivityc5705a72011-12-20 15:59:12 +02001145ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1146 MemoryRegion *mr)
1147{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001148 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001149
1150 size = TARGET_PAGE_ALIGN(size);
1151 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001152 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001153
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001154 /* This assumes the iothread lock is taken here too. */
1155 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001156 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001157 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001158 if (host) {
1159 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001160 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001161 } else if (xen_enabled()) {
1162 if (mem_path) {
1163 fprintf(stderr, "-mem-path not supported with Xen\n");
1164 exit(1);
1165 }
1166 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001167 } else {
1168 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001169 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1170 /*
1171 * file_ram_alloc() needs to allocate just like
1172 * phys_mem_alloc, but we haven't bothered to provide
1173 * a hook there.
1174 */
1175 fprintf(stderr,
1176 "-mem-path not supported with this accelerator\n");
1177 exit(1);
1178 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001179 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001180 }
1181 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001182 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001183 if (!new_block->host) {
1184 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1185 new_block->mr->name, strerror(errno));
1186 exit(1);
1187 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001188 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001189 }
1190 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001191 new_block->length = size;
1192
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001193 /* Keep the list sorted from biggest to smallest block. */
1194 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1195 if (block->length < new_block->length) {
1196 break;
1197 }
1198 }
1199 if (block) {
1200 QTAILQ_INSERT_BEFORE(block, new_block, next);
1201 } else {
1202 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1203 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001204 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001205
Umesh Deshpandef798b072011-08-18 11:41:17 -07001206 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001207 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001208
Anthony Liguori7267c092011-08-20 22:09:37 -05001209 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001210 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001211 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1212 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001213 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001214
Jason Baronddb97f12012-08-02 15:44:16 -04001215 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001216 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001217 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001218
Cam Macdonell84b89d72010-07-26 18:10:57 -06001219 if (kvm_enabled())
1220 kvm_setup_guest_memory(new_block->host, size);
1221
1222 return new_block->offset;
1223}
1224
Avi Kivityc5705a72011-12-20 15:59:12 +02001225ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001226{
Avi Kivityc5705a72011-12-20 15:59:12 +02001227 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001228}
bellarde9a1ab12007-02-08 23:08:38 +00001229
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001230void qemu_ram_free_from_ptr(ram_addr_t addr)
1231{
1232 RAMBlock *block;
1233
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001236 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001237 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001238 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001239 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001240 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001241 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001242 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001243 }
1244 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001245 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001246}
1247
Anthony Liguoric227f092009-10-01 16:12:16 -05001248void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001249{
Alex Williamson04b16652010-07-02 11:13:17 -06001250 RAMBlock *block;
1251
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001252 /* This assumes the iothread lock is taken here too. */
1253 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001254 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001255 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001256 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001257 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001258 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001259 if (block->flags & RAM_PREALLOC_MASK) {
1260 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001261 } else if (xen_enabled()) {
1262 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001263#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001264 } else if (block->fd >= 0) {
1265 munmap(block->host, block->length);
1266 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001267#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001268 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001269 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001270 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001271 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001272 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001273 }
1274 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001275 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001276
bellarde9a1ab12007-02-08 23:08:38 +00001277}
1278
Huang Yingcd19cfa2011-03-02 08:56:19 +01001279#ifndef _WIN32
1280void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1281{
1282 RAMBlock *block;
1283 ram_addr_t offset;
1284 int flags;
1285 void *area, *vaddr;
1286
Paolo Bonzinia3161032012-11-14 15:54:48 +01001287 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001288 offset = addr - block->offset;
1289 if (offset < block->length) {
1290 vaddr = block->host + offset;
1291 if (block->flags & RAM_PREALLOC_MASK) {
1292 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001293 } else if (xen_enabled()) {
1294 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001295 } else {
1296 flags = MAP_FIXED;
1297 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001298 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001299#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001300 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1301 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001302#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001303 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001304#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001305 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1306 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001307 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001308 /*
1309 * Remap needs to match alloc. Accelerators that
1310 * set phys_mem_alloc never remap. If they did,
1311 * we'd need a remap hook here.
1312 */
1313 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1314
Huang Yingcd19cfa2011-03-02 08:56:19 +01001315 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1316 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1317 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001318 }
1319 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001320 fprintf(stderr, "Could not remap addr: "
1321 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001322 length, addr);
1323 exit(1);
1324 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001325 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001326 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001327 }
1328 return;
1329 }
1330 }
1331}
1332#endif /* !_WIN32 */
1333
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001334/* Return a host pointer to ram allocated with qemu_ram_alloc.
1335 With the exception of the softmmu code in this file, this should
1336 only be used for local memory (e.g. video ram) that the device owns,
1337 and knows it isn't going to access beyond the end of the block.
1338
1339 It should not be used for general purpose DMA.
1340 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1341 */
1342void *qemu_get_ram_ptr(ram_addr_t addr)
1343{
1344 RAMBlock *block = qemu_get_ram_block(addr);
1345
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001346 if (xen_enabled()) {
1347 /* We need to check if the requested address is in the RAM
1348 * because we don't want to map the entire memory in QEMU.
1349 * In that case just map until the end of the page.
1350 */
1351 if (block->offset == 0) {
1352 return xen_map_cache(addr, 0, 0);
1353 } else if (block->host == NULL) {
1354 block->host =
1355 xen_map_cache(block->offset, block->length, 1);
1356 }
1357 }
1358 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001359}
1360
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001361/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1362 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001363static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001364{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001365 if (*size == 0) {
1366 return NULL;
1367 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001368 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001369 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001370 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001371 RAMBlock *block;
1372
Paolo Bonzinia3161032012-11-14 15:54:48 +01001373 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001374 if (addr - block->offset < block->length) {
1375 if (addr - block->offset + *size > block->length)
1376 *size = block->length - addr + block->offset;
1377 return block->host + (addr - block->offset);
1378 }
1379 }
1380
1381 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1382 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001383 }
1384}
1385
Paolo Bonzini7443b432013-06-03 12:44:02 +02001386/* Some of the softmmu routines need to translate from a host pointer
1387 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001388MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001389{
pbrook94a6b542009-04-11 17:15:54 +00001390 RAMBlock *block;
1391 uint8_t *host = ptr;
1392
Jan Kiszka868bb332011-06-21 22:59:09 +02001393 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001394 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001395 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001396 }
1397
Paolo Bonzini23887b72013-05-06 14:28:39 +02001398 block = ram_list.mru_block;
1399 if (block && block->host && host - block->host < block->length) {
1400 goto found;
1401 }
1402
Paolo Bonzinia3161032012-11-14 15:54:48 +01001403 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001404 /* This case append when the block is not mapped. */
1405 if (block->host == NULL) {
1406 continue;
1407 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001408 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001409 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001410 }
pbrook94a6b542009-04-11 17:15:54 +00001411 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001412
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001413 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001414
1415found:
1416 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001417 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001418}
Alex Williamsonf471a172010-06-11 11:11:42 -06001419
Avi Kivitya8170e52012-10-23 12:30:10 +02001420static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001421 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001422{
bellard3a7d9292005-08-21 09:26:42 +00001423 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001424 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001425 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001426 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001427 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001428 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001429 switch (size) {
1430 case 1:
1431 stb_p(qemu_get_ram_ptr(ram_addr), val);
1432 break;
1433 case 2:
1434 stw_p(qemu_get_ram_ptr(ram_addr), val);
1435 break;
1436 case 4:
1437 stl_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 default:
1440 abort();
1441 }
bellardf23db162005-08-21 19:12:28 +00001442 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001443 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001444 /* we remove the notdirty callback only if the code has been
1445 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001446 if (dirty_flags == 0xff) {
1447 CPUArchState *env = current_cpu->env_ptr;
1448 tlb_set_dirty(env, env->mem_io_vaddr);
1449 }
bellard1ccde1c2004-02-06 19:46:14 +00001450}
1451
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001452static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1453 unsigned size, bool is_write)
1454{
1455 return is_write;
1456}
1457
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001458static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001459 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001460 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001461 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001462};
1463
pbrook0f459d12008-06-09 00:20:13 +00001464/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001465static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001466{
Andreas Färber4917cf42013-05-27 05:17:50 +02001467 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001468 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001469 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001470 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001471 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001472
aliguori06d55cc2008-11-18 20:24:06 +00001473 if (env->watchpoint_hit) {
1474 /* We re-entered the check after replacing the TB. Now raise
1475 * the debug interrupt so that is will trigger after the
1476 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001477 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001478 return;
1479 }
pbrook2e70f6e2008-06-29 01:03:05 +00001480 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001481 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001482 if ((vaddr == (wp->vaddr & len_mask) ||
1483 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001484 wp->flags |= BP_WATCHPOINT_HIT;
1485 if (!env->watchpoint_hit) {
1486 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001487 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001488 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1489 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001490 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001491 } else {
1492 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1493 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001494 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001495 }
aliguori06d55cc2008-11-18 20:24:06 +00001496 }
aliguori6e140f22008-11-18 20:37:55 +00001497 } else {
1498 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001499 }
1500 }
1501}
1502
pbrook6658ffb2007-03-16 23:58:11 +00001503/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1504 so these check for a hit then pass through to the normal out-of-line
1505 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001506static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001507 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001508{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001509 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1510 switch (size) {
1511 case 1: return ldub_phys(addr);
1512 case 2: return lduw_phys(addr);
1513 case 4: return ldl_phys(addr);
1514 default: abort();
1515 }
pbrook6658ffb2007-03-16 23:58:11 +00001516}
1517
Avi Kivitya8170e52012-10-23 12:30:10 +02001518static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001519 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001520{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001521 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1522 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001523 case 1:
1524 stb_phys(addr, val);
1525 break;
1526 case 2:
1527 stw_phys(addr, val);
1528 break;
1529 case 4:
1530 stl_phys(addr, val);
1531 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001532 default: abort();
1533 }
pbrook6658ffb2007-03-16 23:58:11 +00001534}
1535
Avi Kivity1ec9b902012-01-02 12:47:48 +02001536static const MemoryRegionOps watch_mem_ops = {
1537 .read = watch_mem_read,
1538 .write = watch_mem_write,
1539 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001540};
pbrook6658ffb2007-03-16 23:58:11 +00001541
Avi Kivitya8170e52012-10-23 12:30:10 +02001542static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001543 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001544{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001545 subpage_t *subpage = opaque;
1546 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001547
blueswir1db7b5422007-05-26 17:36:03 +00001548#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001549 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001550 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001551#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001552 address_space_read(subpage->as, addr + subpage->base, buf, len);
1553 switch (len) {
1554 case 1:
1555 return ldub_p(buf);
1556 case 2:
1557 return lduw_p(buf);
1558 case 4:
1559 return ldl_p(buf);
1560 default:
1561 abort();
1562 }
blueswir1db7b5422007-05-26 17:36:03 +00001563}
1564
Avi Kivitya8170e52012-10-23 12:30:10 +02001565static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001566 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001567{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001568 subpage_t *subpage = opaque;
1569 uint8_t buf[4];
1570
blueswir1db7b5422007-05-26 17:36:03 +00001571#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001572 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001573 " value %"PRIx64"\n",
1574 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001575#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001576 switch (len) {
1577 case 1:
1578 stb_p(buf, value);
1579 break;
1580 case 2:
1581 stw_p(buf, value);
1582 break;
1583 case 4:
1584 stl_p(buf, value);
1585 break;
1586 default:
1587 abort();
1588 }
1589 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001590}
1591
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001592static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001593 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001594{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001595 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001596#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001597 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001598 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001599#endif
1600
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001601 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001602 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001603}
1604
Avi Kivity70c68e42012-01-02 12:32:48 +02001605static const MemoryRegionOps subpage_ops = {
1606 .read = subpage_read,
1607 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001608 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001609 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001610};
1611
Anthony Liguoric227f092009-10-01 16:12:16 -05001612static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001613 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001614{
1615 int idx, eidx;
1616
1617 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1618 return -1;
1619 idx = SUBPAGE_IDX(start);
1620 eidx = SUBPAGE_IDX(end);
1621#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001622 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1623 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001624#endif
blueswir1db7b5422007-05-26 17:36:03 +00001625 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001626 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001627 }
1628
1629 return 0;
1630}
1631
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001632static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001633{
Anthony Liguoric227f092009-10-01 16:12:16 -05001634 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001635
Anthony Liguori7267c092011-08-20 22:09:37 -05001636 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001637
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001638 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001639 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001640 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001641 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001642 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001643#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001644 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1645 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001646#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001647 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001648
1649 return mmio;
1650}
1651
Avi Kivity5312bd82012-02-12 18:32:55 +02001652static uint16_t dummy_section(MemoryRegion *mr)
1653{
1654 MemoryRegionSection section = {
1655 .mr = mr,
1656 .offset_within_address_space = 0,
1657 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001658 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001659 };
1660
1661 return phys_section_add(&section);
1662}
1663
Avi Kivitya8170e52012-10-23 12:30:10 +02001664MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001665{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001666 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001667}
1668
Avi Kivitye9179ce2009-06-14 11:38:52 +03001669static void io_mem_init(void)
1670{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001671 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1672 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001673 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001674 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001675 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001676 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001677 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001678}
1679
Avi Kivityac1970f2012-10-03 16:22:53 +02001680static void mem_begin(MemoryListener *listener)
1681{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001682 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001683 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1684
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001685 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001686 d->as = as;
1687 as->next_dispatch = d;
1688}
1689
1690static void mem_commit(MemoryListener *listener)
1691{
1692 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001693 AddressSpaceDispatch *cur = as->dispatch;
1694 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001695
Paolo Bonzini0475d942013-05-29 12:28:21 +02001696 next->nodes = next_map.nodes;
1697 next->sections = next_map.sections;
1698
1699 as->dispatch = next;
1700 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001701}
1702
Avi Kivity50c1e142012-02-08 21:36:02 +02001703static void core_begin(MemoryListener *listener)
1704{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001705 uint16_t n;
1706
Paolo Bonzini60926662013-05-29 12:30:26 +02001707 prev_map = g_new(PhysPageMap, 1);
1708 *prev_map = next_map;
1709
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001710 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001711 n = dummy_section(&io_mem_unassigned);
1712 assert(n == PHYS_SECTION_UNASSIGNED);
1713 n = dummy_section(&io_mem_notdirty);
1714 assert(n == PHYS_SECTION_NOTDIRTY);
1715 n = dummy_section(&io_mem_rom);
1716 assert(n == PHYS_SECTION_ROM);
1717 n = dummy_section(&io_mem_watch);
1718 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001719}
1720
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001721/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1722 * All AddressSpaceDispatch instances have switched to the next map.
1723 */
1724static void core_commit(MemoryListener *listener)
1725{
Paolo Bonzini60926662013-05-29 12:30:26 +02001726 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001727}
1728
Avi Kivity1d711482012-10-02 18:54:45 +02001729static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001730{
Andreas Färber182735e2013-05-29 22:29:20 +02001731 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001732
1733 /* since each CPU stores ram addresses in its TLB cache, we must
1734 reset the modified entries */
1735 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001736 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001737 CPUArchState *env = cpu->env_ptr;
1738
Avi Kivity117712c2012-02-12 21:23:17 +02001739 tlb_flush(env, 1);
1740 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001741}
1742
Avi Kivity93632742012-02-08 16:54:16 +02001743static void core_log_global_start(MemoryListener *listener)
1744{
1745 cpu_physical_memory_set_dirty_tracking(1);
1746}
1747
1748static void core_log_global_stop(MemoryListener *listener)
1749{
1750 cpu_physical_memory_set_dirty_tracking(0);
1751}
1752
Avi Kivity93632742012-02-08 16:54:16 +02001753static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001754 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001755 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001756 .log_global_start = core_log_global_start,
1757 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001758 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001759};
1760
Avi Kivity1d711482012-10-02 18:54:45 +02001761static MemoryListener tcg_memory_listener = {
1762 .commit = tcg_commit,
1763};
1764
Avi Kivityac1970f2012-10-03 16:22:53 +02001765void address_space_init_dispatch(AddressSpace *as)
1766{
Paolo Bonzini00752702013-05-29 12:13:54 +02001767 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001768 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001769 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001770 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1774 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001775 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001776}
1777
Avi Kivity83f3c252012-10-07 12:59:55 +02001778void address_space_destroy_dispatch(AddressSpace *as)
1779{
1780 AddressSpaceDispatch *d = as->dispatch;
1781
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001782 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001783 g_free(d);
1784 as->dispatch = NULL;
1785}
1786
Avi Kivity62152b82011-07-26 14:26:14 +03001787static void memory_map_init(void)
1788{
Anthony Liguori7267c092011-08-20 22:09:37 -05001789 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001790
1791 assert(ADDR_SPACE_BITS <= 64);
1792
1793 memory_region_init(system_memory, NULL, "system",
1794 ADDR_SPACE_BITS == 64 ?
1795 UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001796 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001797
Anthony Liguori7267c092011-08-20 22:09:37 -05001798 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001799 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1800 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001801 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001802
Avi Kivityf6790af2012-10-02 20:13:51 +02001803 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001804 if (tcg_enabled()) {
1805 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1806 }
Avi Kivity62152b82011-07-26 14:26:14 +03001807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
Avi Kivity309cb472011-08-08 16:09:03 +03001814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
pbrooke2eef172008-06-08 01:09:01 +00001819#endif /* !defined(CONFIG_USER_ONLY) */
1820
bellard13eb76e2004-01-24 15:23:36 +00001821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001823int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001824 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001825{
1826 int l, flags;
1827 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001828 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001837 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001840 return -1;
bellard579a97f2007-11-11 14:26:47 +00001841 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001846 } else {
1847 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001848 return -1;
bellard579a97f2007-11-11 14:26:47 +00001849 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001851 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001852 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001853 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
Paul Brooka68fe892010-03-01 00:08:59 +00001859 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001860}
bellard8df1cd02005-01-28 22:37:22 +00001861
bellard13eb76e2004-01-24 15:23:36 +00001862#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001863
Avi Kivitya8170e52012-10-23 12:30:10 +02001864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001873 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001874}
1875
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001876static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1877{
1878 if (memory_region_is_ram(mr)) {
1879 return !(is_write && mr->readonly);
1880 }
1881 if (memory_region_is_romd(mr)) {
1882 return !is_write;
1883 }
1884
1885 return false;
1886}
1887
Richard Henderson23326162013-07-08 14:55:59 -07001888static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001889{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001890 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001891
1892 /* Regions are assumed to support 1-4 byte accesses unless
1893 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001894 if (access_size_max == 0) {
1895 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001896 }
Richard Henderson23326162013-07-08 14:55:59 -07001897
1898 /* Bound the maximum access by the alignment of the address. */
1899 if (!mr->ops->impl.unaligned) {
1900 unsigned align_size_max = addr & -addr;
1901 if (align_size_max != 0 && align_size_max < access_size_max) {
1902 access_size_max = align_size_max;
1903 }
1904 }
1905
1906 /* Don't attempt accesses larger than the maximum. */
1907 if (l > access_size_max) {
1908 l = access_size_max;
1909 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001910 if (l & (l - 1)) {
1911 l = 1 << (qemu_fls(l) - 1);
1912 }
Richard Henderson23326162013-07-08 14:55:59 -07001913
1914 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001915}
1916
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001917bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001918 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001919{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001920 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001921 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001922 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001923 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001924 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001925 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001926
bellard13eb76e2004-01-24 15:23:36 +00001927 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001928 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001929 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001930
bellard13eb76e2004-01-24 15:23:36 +00001931 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001932 if (!memory_access_is_direct(mr, is_write)) {
1933 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001934 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001935 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001936 switch (l) {
1937 case 8:
1938 /* 64 bit write access */
1939 val = ldq_p(buf);
1940 error |= io_mem_write(mr, addr1, val, 8);
1941 break;
1942 case 4:
bellard1c213d12005-09-03 10:49:04 +00001943 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001944 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001945 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001946 break;
1947 case 2:
bellard1c213d12005-09-03 10:49:04 +00001948 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001949 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001950 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07001951 break;
1952 case 1:
bellard1c213d12005-09-03 10:49:04 +00001953 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001954 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001955 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07001956 break;
1957 default:
1958 abort();
bellard13eb76e2004-01-24 15:23:36 +00001959 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001960 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001961 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00001962 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001963 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001964 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001965 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001966 }
1967 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001968 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001969 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001970 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07001971 switch (l) {
1972 case 8:
1973 /* 64 bit read access */
1974 error |= io_mem_read(mr, addr1, &val, 8);
1975 stq_p(buf, val);
1976 break;
1977 case 4:
bellard13eb76e2004-01-24 15:23:36 +00001978 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001979 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001980 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001981 break;
1982 case 2:
bellard13eb76e2004-01-24 15:23:36 +00001983 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001984 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001985 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001986 break;
1987 case 1:
bellard1c213d12005-09-03 10:49:04 +00001988 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001989 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001990 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001991 break;
1992 default:
1993 abort();
bellard13eb76e2004-01-24 15:23:36 +00001994 }
1995 } else {
1996 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001997 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001998 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001999 }
2000 }
2001 len -= l;
2002 buf += l;
2003 addr += l;
2004 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002005
2006 return error;
bellard13eb76e2004-01-24 15:23:36 +00002007}
bellard8df1cd02005-01-28 22:37:22 +00002008
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002009bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002010 const uint8_t *buf, int len)
2011{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002012 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002013}
2014
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002015bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002016{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002017 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002018}
2019
2020
Avi Kivitya8170e52012-10-23 12:30:10 +02002021void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002022 int len, int is_write)
2023{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002024 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002025}
2026
bellardd0ecd2a2006-04-23 17:14:48 +00002027/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002028void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002029 const uint8_t *buf, int len)
2030{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002031 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002032 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002033 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002034 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002035
bellardd0ecd2a2006-04-23 17:14:48 +00002036 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002037 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002038 mr = address_space_translate(&address_space_memory,
2039 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002040
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002041 if (!(memory_region_is_ram(mr) ||
2042 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002043 /* do nothing */
2044 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002045 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002046 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002047 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002048 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002049 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002050 }
2051 len -= l;
2052 buf += l;
2053 addr += l;
2054 }
2055}
2056
aliguori6d16c2f2009-01-22 16:59:11 +00002057typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002058 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002059 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002060 hwaddr addr;
2061 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002062} BounceBuffer;
2063
2064static BounceBuffer bounce;
2065
aliguoriba223c22009-01-22 16:59:16 +00002066typedef struct MapClient {
2067 void *opaque;
2068 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002069 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002070} MapClient;
2071
Blue Swirl72cf2d42009-09-12 07:36:22 +00002072static QLIST_HEAD(map_client_list, MapClient) map_client_list
2073 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002074
2075void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2076{
Anthony Liguori7267c092011-08-20 22:09:37 -05002077 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002078
2079 client->opaque = opaque;
2080 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002081 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002082 return client;
2083}
2084
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002085static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002086{
2087 MapClient *client = (MapClient *)_client;
2088
Blue Swirl72cf2d42009-09-12 07:36:22 +00002089 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002090 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002091}
2092
2093static void cpu_notify_map_clients(void)
2094{
2095 MapClient *client;
2096
Blue Swirl72cf2d42009-09-12 07:36:22 +00002097 while (!QLIST_EMPTY(&map_client_list)) {
2098 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002099 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002100 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002101 }
2102}
2103
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002104bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2105{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002106 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002107 hwaddr l, xlat;
2108
2109 while (len > 0) {
2110 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002111 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2112 if (!memory_access_is_direct(mr, is_write)) {
2113 l = memory_access_size(mr, l, addr);
2114 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002115 return false;
2116 }
2117 }
2118
2119 len -= l;
2120 addr += l;
2121 }
2122 return true;
2123}
2124
aliguori6d16c2f2009-01-22 16:59:11 +00002125/* Map a physical memory region into a host virtual address.
2126 * May map a subset of the requested range, given by and returned in *plen.
2127 * May return NULL if resources needed to perform the mapping are exhausted.
2128 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002129 * Use cpu_register_map_client() to know when retrying the map operation is
2130 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002131 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002132void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002133 hwaddr addr,
2134 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002135 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002136{
Avi Kivitya8170e52012-10-23 12:30:10 +02002137 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002138 hwaddr done = 0;
2139 hwaddr l, xlat, base;
2140 MemoryRegion *mr, *this_mr;
2141 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002142
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002143 if (len == 0) {
2144 return NULL;
2145 }
aliguori6d16c2f2009-01-22 16:59:11 +00002146
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002147 l = len;
2148 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2149 if (!memory_access_is_direct(mr, is_write)) {
2150 if (bounce.buffer) {
2151 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002152 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002153 /* Avoid unbounded allocations */
2154 l = MIN(l, TARGET_PAGE_SIZE);
2155 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002156 bounce.addr = addr;
2157 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002158
2159 memory_region_ref(mr);
2160 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002161 if (!is_write) {
2162 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002163 }
aliguori6d16c2f2009-01-22 16:59:11 +00002164
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002165 *plen = l;
2166 return bounce.buffer;
2167 }
2168
2169 base = xlat;
2170 raddr = memory_region_get_ram_addr(mr);
2171
2172 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002173 len -= l;
2174 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002175 done += l;
2176 if (len == 0) {
2177 break;
2178 }
2179
2180 l = len;
2181 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2182 if (this_mr != mr || xlat != base + done) {
2183 break;
2184 }
aliguori6d16c2f2009-01-22 16:59:11 +00002185 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002186
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002187 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002188 *plen = done;
2189 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002190}
2191
Avi Kivityac1970f2012-10-03 16:22:53 +02002192/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002193 * Will also mark the memory as dirty if is_write == 1. access_len gives
2194 * the amount of memory that was actually read or written by the caller.
2195 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002196void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2197 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002198{
2199 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002200 MemoryRegion *mr;
2201 ram_addr_t addr1;
2202
2203 mr = qemu_ram_addr_from_host(buffer, &addr1);
2204 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002205 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002206 while (access_len) {
2207 unsigned l;
2208 l = TARGET_PAGE_SIZE;
2209 if (l > access_len)
2210 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002211 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002212 addr1 += l;
2213 access_len -= l;
2214 }
2215 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002216 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002217 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002218 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002219 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002220 return;
2221 }
2222 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002223 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002224 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002225 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002226 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002227 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002228 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002229}
bellardd0ecd2a2006-04-23 17:14:48 +00002230
Avi Kivitya8170e52012-10-23 12:30:10 +02002231void *cpu_physical_memory_map(hwaddr addr,
2232 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002233 int is_write)
2234{
2235 return address_space_map(&address_space_memory, addr, plen, is_write);
2236}
2237
Avi Kivitya8170e52012-10-23 12:30:10 +02002238void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2239 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002240{
2241 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2242}
2243
bellard8df1cd02005-01-28 22:37:22 +00002244/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002245static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002246 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002247{
bellard8df1cd02005-01-28 22:37:22 +00002248 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002249 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002250 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002251 hwaddr l = 4;
2252 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002253
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002254 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2255 false);
2256 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002257 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002258 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002259#if defined(TARGET_WORDS_BIGENDIAN)
2260 if (endian == DEVICE_LITTLE_ENDIAN) {
2261 val = bswap32(val);
2262 }
2263#else
2264 if (endian == DEVICE_BIG_ENDIAN) {
2265 val = bswap32(val);
2266 }
2267#endif
bellard8df1cd02005-01-28 22:37:22 +00002268 } else {
2269 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002270 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002271 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002272 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002273 switch (endian) {
2274 case DEVICE_LITTLE_ENDIAN:
2275 val = ldl_le_p(ptr);
2276 break;
2277 case DEVICE_BIG_ENDIAN:
2278 val = ldl_be_p(ptr);
2279 break;
2280 default:
2281 val = ldl_p(ptr);
2282 break;
2283 }
bellard8df1cd02005-01-28 22:37:22 +00002284 }
2285 return val;
2286}
2287
Avi Kivitya8170e52012-10-23 12:30:10 +02002288uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002289{
2290 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2291}
2292
Avi Kivitya8170e52012-10-23 12:30:10 +02002293uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002294{
2295 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2296}
2297
Avi Kivitya8170e52012-10-23 12:30:10 +02002298uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002299{
2300 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2301}
2302
bellard84b7b8e2005-11-28 21:19:04 +00002303/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002304static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002305 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002306{
bellard84b7b8e2005-11-28 21:19:04 +00002307 uint8_t *ptr;
2308 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002309 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002310 hwaddr l = 8;
2311 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002312
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002313 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2314 false);
2315 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002316 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002317 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002318#if defined(TARGET_WORDS_BIGENDIAN)
2319 if (endian == DEVICE_LITTLE_ENDIAN) {
2320 val = bswap64(val);
2321 }
2322#else
2323 if (endian == DEVICE_BIG_ENDIAN) {
2324 val = bswap64(val);
2325 }
2326#endif
bellard84b7b8e2005-11-28 21:19:04 +00002327 } else {
2328 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002329 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002330 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002331 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002332 switch (endian) {
2333 case DEVICE_LITTLE_ENDIAN:
2334 val = ldq_le_p(ptr);
2335 break;
2336 case DEVICE_BIG_ENDIAN:
2337 val = ldq_be_p(ptr);
2338 break;
2339 default:
2340 val = ldq_p(ptr);
2341 break;
2342 }
bellard84b7b8e2005-11-28 21:19:04 +00002343 }
2344 return val;
2345}
2346
Avi Kivitya8170e52012-10-23 12:30:10 +02002347uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002348{
2349 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2350}
2351
Avi Kivitya8170e52012-10-23 12:30:10 +02002352uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002353{
2354 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2355}
2356
Avi Kivitya8170e52012-10-23 12:30:10 +02002357uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002358{
2359 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2360}
2361
bellardaab33092005-10-30 20:48:42 +00002362/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002363uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002364{
2365 uint8_t val;
2366 cpu_physical_memory_read(addr, &val, 1);
2367 return val;
2368}
2369
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002370/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002371static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002372 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002373{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002374 uint8_t *ptr;
2375 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002376 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002377 hwaddr l = 2;
2378 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002379
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002380 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2381 false);
2382 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002383 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002384 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002385#if defined(TARGET_WORDS_BIGENDIAN)
2386 if (endian == DEVICE_LITTLE_ENDIAN) {
2387 val = bswap16(val);
2388 }
2389#else
2390 if (endian == DEVICE_BIG_ENDIAN) {
2391 val = bswap16(val);
2392 }
2393#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002394 } else {
2395 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002396 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002397 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002398 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002399 switch (endian) {
2400 case DEVICE_LITTLE_ENDIAN:
2401 val = lduw_le_p(ptr);
2402 break;
2403 case DEVICE_BIG_ENDIAN:
2404 val = lduw_be_p(ptr);
2405 break;
2406 default:
2407 val = lduw_p(ptr);
2408 break;
2409 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002410 }
2411 return val;
bellardaab33092005-10-30 20:48:42 +00002412}
2413
Avi Kivitya8170e52012-10-23 12:30:10 +02002414uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002415{
2416 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2417}
2418
Avi Kivitya8170e52012-10-23 12:30:10 +02002419uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002420{
2421 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2422}
2423
Avi Kivitya8170e52012-10-23 12:30:10 +02002424uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002425{
2426 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2427}
2428
bellard8df1cd02005-01-28 22:37:22 +00002429/* warning: addr must be aligned. The ram page is not masked as dirty
2430 and the code inside is not invalidated. It is useful if the dirty
2431 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002432void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002433{
bellard8df1cd02005-01-28 22:37:22 +00002434 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002435 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002436 hwaddr l = 4;
2437 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002438
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002439 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2440 true);
2441 if (l < 4 || !memory_access_is_direct(mr, true)) {
2442 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002443 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002444 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002445 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002446 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002447
2448 if (unlikely(in_migration)) {
2449 if (!cpu_physical_memory_is_dirty(addr1)) {
2450 /* invalidate code */
2451 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2452 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002453 cpu_physical_memory_set_dirty_flags(
2454 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002455 }
2456 }
bellard8df1cd02005-01-28 22:37:22 +00002457 }
2458}
2459
2460/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002461static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002462 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002463{
bellard8df1cd02005-01-28 22:37:22 +00002464 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002465 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002466 hwaddr l = 4;
2467 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002468
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002469 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2470 true);
2471 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002472#if defined(TARGET_WORDS_BIGENDIAN)
2473 if (endian == DEVICE_LITTLE_ENDIAN) {
2474 val = bswap32(val);
2475 }
2476#else
2477 if (endian == DEVICE_BIG_ENDIAN) {
2478 val = bswap32(val);
2479 }
2480#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002481 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002482 } else {
bellard8df1cd02005-01-28 22:37:22 +00002483 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002484 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002485 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002486 switch (endian) {
2487 case DEVICE_LITTLE_ENDIAN:
2488 stl_le_p(ptr, val);
2489 break;
2490 case DEVICE_BIG_ENDIAN:
2491 stl_be_p(ptr, val);
2492 break;
2493 default:
2494 stl_p(ptr, val);
2495 break;
2496 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002497 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002498 }
2499}
2500
Avi Kivitya8170e52012-10-23 12:30:10 +02002501void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002502{
2503 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2504}
2505
Avi Kivitya8170e52012-10-23 12:30:10 +02002506void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002507{
2508 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2509}
2510
Avi Kivitya8170e52012-10-23 12:30:10 +02002511void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002512{
2513 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2514}
2515
bellardaab33092005-10-30 20:48:42 +00002516/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002517void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002518{
2519 uint8_t v = val;
2520 cpu_physical_memory_write(addr, &v, 1);
2521}
2522
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002523/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002524static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002525 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002526{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002527 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002528 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002529 hwaddr l = 2;
2530 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002531
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002532 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2533 true);
2534 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535#if defined(TARGET_WORDS_BIGENDIAN)
2536 if (endian == DEVICE_LITTLE_ENDIAN) {
2537 val = bswap16(val);
2538 }
2539#else
2540 if (endian == DEVICE_BIG_ENDIAN) {
2541 val = bswap16(val);
2542 }
2543#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002544 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002545 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002546 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002547 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002548 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002549 switch (endian) {
2550 case DEVICE_LITTLE_ENDIAN:
2551 stw_le_p(ptr, val);
2552 break;
2553 case DEVICE_BIG_ENDIAN:
2554 stw_be_p(ptr, val);
2555 break;
2556 default:
2557 stw_p(ptr, val);
2558 break;
2559 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002560 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002561 }
bellardaab33092005-10-30 20:48:42 +00002562}
2563
Avi Kivitya8170e52012-10-23 12:30:10 +02002564void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002565{
2566 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2567}
2568
Avi Kivitya8170e52012-10-23 12:30:10 +02002569void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002570{
2571 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2572}
2573
Avi Kivitya8170e52012-10-23 12:30:10 +02002574void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002575{
2576 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2577}
2578
bellardaab33092005-10-30 20:48:42 +00002579/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002580void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002581{
2582 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002583 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002584}
2585
Avi Kivitya8170e52012-10-23 12:30:10 +02002586void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002587{
2588 val = cpu_to_le64(val);
2589 cpu_physical_memory_write(addr, &val, 8);
2590}
2591
Avi Kivitya8170e52012-10-23 12:30:10 +02002592void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002593{
2594 val = cpu_to_be64(val);
2595 cpu_physical_memory_write(addr, &val, 8);
2596}
2597
aliguori5e2972f2009-03-28 17:51:36 +00002598/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002599int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002600 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002601{
2602 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002603 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002604 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002605
2606 while (len > 0) {
2607 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002608 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002609 /* if no physical page mapped, return an error */
2610 if (phys_addr == -1)
2611 return -1;
2612 l = (page + TARGET_PAGE_SIZE) - addr;
2613 if (l > len)
2614 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002615 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002616 if (is_write)
2617 cpu_physical_memory_write_rom(phys_addr, buf, l);
2618 else
aliguori5e2972f2009-03-28 17:51:36 +00002619 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002620 len -= l;
2621 buf += l;
2622 addr += l;
2623 }
2624 return 0;
2625}
Paul Brooka68fe892010-03-01 00:08:59 +00002626#endif
bellard13eb76e2004-01-24 15:23:36 +00002627
Blue Swirl8e4a4242013-01-06 18:30:17 +00002628#if !defined(CONFIG_USER_ONLY)
2629
2630/*
2631 * A helper function for the _utterly broken_ virtio device model to find out if
2632 * it's running on a big endian machine. Don't do this at home kids!
2633 */
2634bool virtio_is_big_endian(void);
2635bool virtio_is_big_endian(void)
2636{
2637#if defined(TARGET_WORDS_BIGENDIAN)
2638 return true;
2639#else
2640 return false;
2641#endif
2642}
2643
2644#endif
2645
Wen Congyang76f35532012-05-07 12:04:18 +08002646#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002647bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002648{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002649 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002650 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002651
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002652 mr = address_space_translate(&address_space_memory,
2653 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002654
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002655 return !(memory_region_is_ram(mr) ||
2656 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002657}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002658
2659void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2660{
2661 RAMBlock *block;
2662
2663 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2664 func(block->host, block->offset, block->length, opaque);
2665 }
2666}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002667#endif