blob: 52d451b5474ae9503a6c02cda1f2ffd988dcb707 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Alexander Grafa94b36d2013-12-12 10:29:19 +010053#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020054
blueswir1db7b5422007-05-26 17:36:03 +000055//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000056
pbrook99773bd2006-04-16 15:14:59 +000057#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020067
Paolo Bonzini0844e002013-05-24 14:37:28 +020068MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020069static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020070
pbrooke2eef172008-06-08 01:09:01 +000071#endif
bellard9fa3e852004-01-04 18:06:42 +000072
Andreas Färberbdc44642013-06-24 23:50:24 +020073struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000074/* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020076DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000077/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000078 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000079 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010080int use_icount;
bellard6a00d602005-11-21 23:25:50 +000081
pbrooke2eef172008-06-08 01:09:01 +000082#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020083
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020084typedef struct PhysPageEntry PhysPageEntry;
85
86struct PhysPageEntry {
87 uint16_t is_leaf : 1;
88 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
89 uint16_t ptr : 15;
90};
91
Paolo Bonzini0475d942013-05-29 12:28:21 +020092typedef PhysPageEntry Node[L2_SIZE];
93
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020094struct AddressSpaceDispatch {
95 /* This is a multi-level map on the physical address space.
96 * The bottom level has pointers to MemoryRegionSections.
97 */
98 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +020099 Node *nodes;
100 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200101 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200102};
103
Jan Kiszka90260c62013-05-26 21:46:51 +0200104#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
105typedef struct subpage_t {
106 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200107 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200108 hwaddr base;
109 uint16_t sub_section[TARGET_PAGE_SIZE];
110} subpage_t;
111
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200112#define PHYS_SECTION_UNASSIGNED 0
113#define PHYS_SECTION_NOTDIRTY 1
114#define PHYS_SECTION_ROM 2
115#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200116
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200117typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124} PhysPageMap;
125
Paolo Bonzini60926662013-05-29 12:30:26 +0200126static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200127static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200128
Avi Kivity07f07b32012-02-13 20:45:32 +0200129#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130
pbrooke2eef172008-06-08 01:09:01 +0000131static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300132static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000133
Avi Kivity1ec9b902012-01-02 12:47:48 +0200134static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000135#endif
bellard54936002003-05-13 00:25:15 +0000136
Paul Brook6d9a1302010-02-28 23:55:53 +0000137#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139static void phys_map_node_reserve(unsigned nodes)
140{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148 }
149}
150
151static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200152{
153 unsigned i;
154 uint16_t ret;
155
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200156 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200158 assert(ret != next_map.nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200159 for (i = 0; i < L2_SIZE; ++i) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200163 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200164}
165
Avi Kivitya8170e52012-10-23 12:30:10 +0200166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200168 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200169{
170 PhysPageEntry *p;
171 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173
Avi Kivity07f07b32012-02-13 20:45:32 +0200174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200175 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200176 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200179 p[i].is_leaf = 1;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200181 }
182 }
183 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200184 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185 }
Avi Kivity29990972012-02-13 20:21:20 +0200186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187
Avi Kivity29990972012-02-13 20:21:20 +0200188 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200191 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 *index += step;
193 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198 }
199}
200
Avi Kivityac1970f2012-10-03 16:22:53 +0200201static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200202 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200203 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000204{
Avi Kivity29990972012-02-13 20:21:20 +0200205 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200206 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000207
Avi Kivityac1970f2012-10-03 16:22:53 +0200208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000209}
210
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000213{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200214 PhysPageEntry *p;
215 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200216
Avi Kivity07f07b32012-02-13 20:45:32 +0200217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200219 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200220 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200221 p = nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200223 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200224 return &sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200225}
226
Blue Swirle5548612012-04-21 13:08:33 +0000227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000230 && mr != &io_mem_watch;
231}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200232
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200233static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200234 hwaddr addr,
235 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200236{
Jan Kiszka90260c62013-05-26 21:46:51 +0200237 MemoryRegionSection *section;
238 subpage_t *subpage;
239
Paolo Bonzini0475d942013-05-29 12:28:21 +0200240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200245 }
246 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200247}
248
Jan Kiszka90260c62013-05-26 21:46:51 +0200249static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200250address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200251 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200252{
253 MemoryRegionSection *section;
254 Int128 diff;
255
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200256 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
259
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
262
263 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200265 return section;
266}
Jan Kiszka90260c62013-05-26 21:46:51 +0200267
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200268MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200271{
Avi Kivity30951152012-10-30 13:47:46 +0200272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
276
277 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200279 mr = section->mr;
280
281 if (!mr->iommu_ops) {
282 break;
283 }
284
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
292 }
293
294 as = iotlb.target_as;
295 }
296
297 *plen = len;
298 *xlat = addr;
299 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200300}
301
302MemoryRegionSection *
303address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
305{
Avi Kivity30951152012-10-30 13:47:46 +0200306 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200308
309 assert(!section->mr->iommu_ops);
310 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200311}
bellard9fa3e852004-01-04 18:06:42 +0000312#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000313
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200314void cpu_exec_init_all(void)
315{
316#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700317 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200318 memory_map_init();
319 io_mem_init();
320#endif
321}
322
Andreas Färberb170fce2013-01-20 20:23:22 +0100323#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000324
Juan Quintelae59fb372009-09-29 22:48:21 +0200325static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200326{
Andreas Färber259186a2013-01-17 18:51:17 +0100327 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200328
aurel323098dba2009-03-07 21:28:24 +0000329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000333
334 return 0;
335}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200336
Andreas Färber1a1562f2013-06-17 04:09:11 +0200337const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200346 VMSTATE_END_OF_LIST()
347 }
348};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200349
pbrook9656f322008-07-01 20:01:19 +0000350#endif
351
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100352CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400353{
Andreas Färberbdc44642013-06-24 23:50:24 +0200354 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400355
Andreas Färberbdc44642013-06-24 23:50:24 +0200356 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100357 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200358 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100359 }
Glauber Costa950f1472009-06-09 12:15:18 -0400360 }
361
Andreas Färberbdc44642013-06-24 23:50:24 +0200362 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400363}
364
Andreas Färber9349b4f2012-03-14 01:38:32 +0100365void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000366{
Andreas Färber9f09e182012-05-03 06:59:07 +0200367 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100368 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200369 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000370 int cpu_index;
371
pbrookc2764712009-03-07 15:24:59 +0000372#if defined(CONFIG_USER_ONLY)
373 cpu_list_lock();
374#endif
bellard6a00d602005-11-21 23:25:50 +0000375 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200376 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000377 cpu_index++;
378 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100379 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100380 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000381 QTAILQ_INIT(&env->breakpoints);
382 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100383#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200384 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100385#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200386 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000387#if defined(CONFIG_USER_ONLY)
388 cpu_list_unlock();
389#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200390 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
391 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
392 }
pbrookb3c77242008-06-30 16:31:04 +0000393#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600394 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000395 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100396 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200397 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000398#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100399 if (cc->vmsd != NULL) {
400 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
401 }
bellardfd6ce8f2003-05-14 19:00:11 +0000402}
403
bellard1fddef42005-04-17 19:16:13 +0000404#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000405#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200406static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000407{
408 tb_invalidate_phys_page_range(pc, pc + 1, 0);
409}
410#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200411static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400412{
Max Filippove8262a12013-09-27 22:29:17 +0400413 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
414 if (phys != -1) {
415 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
416 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400417}
bellardc27004e2005-01-03 23:35:10 +0000418#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000419#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000420
Paul Brookc527ee82010-03-01 03:31:14 +0000421#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100422void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000423
424{
425}
426
Andreas Färber9349b4f2012-03-14 01:38:32 +0100427int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000428 int flags, CPUWatchpoint **watchpoint)
429{
430 return -ENOSYS;
431}
432#else
pbrook6658ffb2007-03-16 23:58:11 +0000433/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100434int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000435 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000436{
aliguorib4051332008-11-18 20:14:20 +0000437 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000438 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000439
aliguorib4051332008-11-18 20:14:20 +0000440 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400441 if ((len & (len - 1)) || (addr & ~len_mask) ||
442 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000443 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
444 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
445 return -EINVAL;
446 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500447 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000448
aliguoria1d1bb32008-11-18 20:07:32 +0000449 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000450 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000451 wp->flags = flags;
452
aliguori2dc9f412008-11-18 20:56:59 +0000453 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000454 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000455 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000456 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000457 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000458
pbrook6658ffb2007-03-16 23:58:11 +0000459 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000460
461 if (watchpoint)
462 *watchpoint = wp;
463 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000464}
465
aliguoria1d1bb32008-11-18 20:07:32 +0000466/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100467int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000468 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000469{
aliguorib4051332008-11-18 20:14:20 +0000470 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000471 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000472
Blue Swirl72cf2d42009-09-12 07:36:22 +0000473 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000474 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000475 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000476 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000477 return 0;
478 }
479 }
aliguoria1d1bb32008-11-18 20:07:32 +0000480 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000481}
482
aliguoria1d1bb32008-11-18 20:07:32 +0000483/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100484void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000485{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000486 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000487
aliguoria1d1bb32008-11-18 20:07:32 +0000488 tlb_flush_page(env, watchpoint->vaddr);
489
Anthony Liguori7267c092011-08-20 22:09:37 -0500490 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000491}
492
aliguoria1d1bb32008-11-18 20:07:32 +0000493/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100494void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000495{
aliguoric0ce9982008-11-25 22:13:57 +0000496 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000497
Blue Swirl72cf2d42009-09-12 07:36:22 +0000498 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000499 if (wp->flags & mask)
500 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000501 }
aliguoria1d1bb32008-11-18 20:07:32 +0000502}
Paul Brookc527ee82010-03-01 03:31:14 +0000503#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000504
505/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100506int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000507 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000508{
bellard1fddef42005-04-17 19:16:13 +0000509#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000510 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000511
Anthony Liguori7267c092011-08-20 22:09:37 -0500512 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000513
514 bp->pc = pc;
515 bp->flags = flags;
516
aliguori2dc9f412008-11-18 20:56:59 +0000517 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200518 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000519 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200520 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000521 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200522 }
aliguoria1d1bb32008-11-18 20:07:32 +0000523
Andreas Färber00b941e2013-06-29 18:55:54 +0200524 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000525
Andreas Färber00b941e2013-06-29 18:55:54 +0200526 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000527 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200528 }
aliguoria1d1bb32008-11-18 20:07:32 +0000529 return 0;
530#else
531 return -ENOSYS;
532#endif
533}
534
535/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100536int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000537{
538#if defined(TARGET_HAS_ICE)
539 CPUBreakpoint *bp;
540
Blue Swirl72cf2d42009-09-12 07:36:22 +0000541 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000542 if (bp->pc == pc && bp->flags == flags) {
543 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000544 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000545 }
bellard4c3a88a2003-07-26 12:06:08 +0000546 }
aliguoria1d1bb32008-11-18 20:07:32 +0000547 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000548#else
aliguoria1d1bb32008-11-18 20:07:32 +0000549 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000550#endif
551}
552
aliguoria1d1bb32008-11-18 20:07:32 +0000553/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100554void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000555{
bellard1fddef42005-04-17 19:16:13 +0000556#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000557 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000558
Andreas Färber00b941e2013-06-29 18:55:54 +0200559 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000560
Anthony Liguori7267c092011-08-20 22:09:37 -0500561 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000562#endif
563}
564
565/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100566void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000567{
568#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000569 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000570
Blue Swirl72cf2d42009-09-12 07:36:22 +0000571 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000572 if (bp->flags & mask)
573 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000574 }
bellard4c3a88a2003-07-26 12:06:08 +0000575#endif
576}
577
bellardc33a3462003-07-29 20:50:33 +0000578/* enable or disable single step mode. EXCP_DEBUG is returned by the
579 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200580void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000581{
bellard1fddef42005-04-17 19:16:13 +0000582#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200583 if (cpu->singlestep_enabled != enabled) {
584 cpu->singlestep_enabled = enabled;
585 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200586 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200587 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100588 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000589 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200590 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000591 tb_flush(env);
592 }
bellardc33a3462003-07-29 20:50:33 +0000593 }
594#endif
595}
596
Andreas Färber9349b4f2012-03-14 01:38:32 +0100597void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000598{
Andreas Färber878096e2013-05-27 01:33:50 +0200599 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000600 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000601 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000602
603 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000604 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000605 fprintf(stderr, "qemu: fatal: ");
606 vfprintf(stderr, fmt, ap);
607 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200608 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000609 if (qemu_log_enabled()) {
610 qemu_log("qemu: fatal: ");
611 qemu_log_vprintf(fmt, ap2);
612 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200613 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000614 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000615 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000616 }
pbrook493ae1f2007-11-23 16:53:59 +0000617 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000618 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200619#if defined(CONFIG_USER_ONLY)
620 {
621 struct sigaction act;
622 sigfillset(&act.sa_mask);
623 act.sa_handler = SIG_DFL;
624 sigaction(SIGABRT, &act, NULL);
625 }
626#endif
bellard75012672003-06-21 13:11:07 +0000627 abort();
628}
629
bellard01243112004-01-04 15:48:17 +0000630#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200631static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
632{
633 RAMBlock *block;
634
635 /* The list is protected by the iothread lock here. */
636 block = ram_list.mru_block;
637 if (block && addr - block->offset < block->length) {
638 goto found;
639 }
640 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
641 if (addr - block->offset < block->length) {
642 goto found;
643 }
644 }
645
646 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
647 abort();
648
649found:
650 ram_list.mru_block = block;
651 return block;
652}
653
Juan Quintelad24981d2012-05-22 00:42:40 +0200654static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
655 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000656{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200657 RAMBlock *block;
658 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000659
Paolo Bonzini041603f2013-09-09 17:49:45 +0200660 block = qemu_get_ram_block(start);
661 assert(block == qemu_get_ram_block(end - 1));
662 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000663 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200664}
665
666/* Note: start and end must be within the same ram block. */
667void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
668 int dirty_flags)
669{
670 uintptr_t length;
671
672 start &= TARGET_PAGE_MASK;
673 end = TARGET_PAGE_ALIGN(end);
674
675 length = end - start;
676 if (length == 0)
677 return;
678 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
679
680 if (tcg_enabled()) {
681 tlb_reset_dirty_range_all(start, end, length);
682 }
bellard1ccde1c2004-02-06 19:46:14 +0000683}
684
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000685static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000686{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200687 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000688 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200689 return ret;
aliguori74576192008-10-06 14:02:03 +0000690}
691
Avi Kivitya8170e52012-10-23 12:30:10 +0200692hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200693 MemoryRegionSection *section,
694 target_ulong vaddr,
695 hwaddr paddr, hwaddr xlat,
696 int prot,
697 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000698{
Avi Kivitya8170e52012-10-23 12:30:10 +0200699 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000700 CPUWatchpoint *wp;
701
Blue Swirlcc5bea62012-04-14 14:56:48 +0000702 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000703 /* Normal RAM. */
704 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200705 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000706 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200707 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000708 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200709 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000710 }
711 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200712 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200713 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000714 }
715
716 /* Make accesses to pages with watchpoints go via the
717 watchpoint trap routines. */
718 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
719 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
720 /* Avoid trapping reads of pages with a write breakpoint. */
721 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200722 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000723 *address |= TLB_MMIO;
724 break;
725 }
726 }
727 }
728
729 return iotlb;
730}
bellard9fa3e852004-01-04 18:06:42 +0000731#endif /* defined(CONFIG_USER_ONLY) */
732
pbrooke2eef172008-06-08 01:09:01 +0000733#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000734
Anthony Liguoric227f092009-10-01 16:12:16 -0500735static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200736 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200737static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200738
Stefan Weil575ddeb2013-09-29 20:56:45 +0200739static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200740
741/*
742 * Set a custom physical guest memory alloator.
743 * Accelerators with unusual needs may need this. Hopefully, we can
744 * get rid of it eventually.
745 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200746void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200747{
748 phys_mem_alloc = alloc;
749}
750
Avi Kivity5312bd82012-02-12 18:32:55 +0200751static uint16_t phys_section_add(MemoryRegionSection *section)
752{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200753 /* The physical section number is ORed with a page-aligned
754 * pointer to produce the iotlb entries. Thus it should
755 * never overflow into the page-aligned value.
756 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200757 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200758
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200759 if (next_map.sections_nb == next_map.sections_nb_alloc) {
760 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
761 16);
762 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
763 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200764 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200765 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200766 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200767 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200768}
769
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200770static void phys_section_destroy(MemoryRegion *mr)
771{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200772 memory_region_unref(mr);
773
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200774 if (mr->subpage) {
775 subpage_t *subpage = container_of(mr, subpage_t, iomem);
776 memory_region_destroy(&subpage->iomem);
777 g_free(subpage);
778 }
779}
780
Paolo Bonzini60926662013-05-29 12:30:26 +0200781static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200782{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200783 while (map->sections_nb > 0) {
784 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200785 phys_section_destroy(section->mr);
786 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200787 g_free(map->sections);
788 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200789 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200790}
791
Avi Kivityac1970f2012-10-03 16:22:53 +0200792static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200793{
794 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200795 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200796 & TARGET_PAGE_MASK;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200797 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
798 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200799 MemoryRegionSection subsection = {
800 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200801 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200802 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200803 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200804
Avi Kivityf3705d52012-03-08 16:16:34 +0200805 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200806
Avi Kivityf3705d52012-03-08 16:16:34 +0200807 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200808 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200809 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200810 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200811 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200812 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200813 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200814 }
815 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200816 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200817 subpage_register(subpage, start, end, phys_section_add(section));
818}
819
820
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200821static void register_multipage(AddressSpaceDispatch *d,
822 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000823{
Avi Kivitya8170e52012-10-23 12:30:10 +0200824 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200825 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200826 uint64_t num_pages = int128_get64(int128_rshift(section->size,
827 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200828
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200829 assert(num_pages);
830 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000831}
832
Avi Kivityac1970f2012-10-03 16:22:53 +0200833static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200834{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200835 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200836 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200837 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200838 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200839
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200840 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
841 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
842 - now.offset_within_address_space;
843
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200844 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200845 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200846 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200847 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200848 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200849 while (int128_ne(remain.size, now.size)) {
850 remain.size = int128_sub(remain.size, now.size);
851 remain.offset_within_address_space += int128_get64(now.size);
852 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400853 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200854 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200855 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800856 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200857 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200858 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400859 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200860 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200861 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400862 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200863 }
864}
865
Sheng Yang62a27442010-01-26 19:21:16 +0800866void qemu_flush_coalesced_mmio_buffer(void)
867{
868 if (kvm_enabled())
869 kvm_flush_coalesced_mmio_buffer();
870}
871
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700872void qemu_mutex_lock_ramlist(void)
873{
874 qemu_mutex_lock(&ram_list.mutex);
875}
876
877void qemu_mutex_unlock_ramlist(void)
878{
879 qemu_mutex_unlock(&ram_list.mutex);
880}
881
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200882#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300883
884#include <sys/vfs.h>
885
886#define HUGETLBFS_MAGIC 0x958458f6
887
888static long gethugepagesize(const char *path)
889{
890 struct statfs fs;
891 int ret;
892
893 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900894 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300895 } while (ret != 0 && errno == EINTR);
896
897 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900898 perror(path);
899 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300900 }
901
902 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900903 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300904
905 return fs.f_bsize;
906}
907
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200908static sigjmp_buf sigjump;
909
910static void sigbus_handler(int signal)
911{
912 siglongjmp(sigjump, 1);
913}
914
Alex Williamson04b16652010-07-02 11:13:17 -0600915static void *file_ram_alloc(RAMBlock *block,
916 ram_addr_t memory,
917 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300918{
919 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500920 char *sanitized_name;
921 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300922 void *area;
923 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300924 unsigned long hpagesize;
925
926 hpagesize = gethugepagesize(path);
927 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900928 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300929 }
930
931 if (memory < hpagesize) {
932 return NULL;
933 }
934
935 if (kvm_enabled() && !kvm_has_sync_mmu()) {
936 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
937 return NULL;
938 }
939
Peter Feiner8ca761f2013-03-04 13:54:25 -0500940 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
941 sanitized_name = g_strdup(block->mr->name);
942 for (c = sanitized_name; *c != '\0'; c++) {
943 if (*c == '/')
944 *c = '_';
945 }
946
947 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
948 sanitized_name);
949 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300950
951 fd = mkstemp(filename);
952 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900953 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100954 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900955 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300956 }
957 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100958 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300959
960 memory = (memory+hpagesize-1) & ~(hpagesize-1);
961
962 /*
963 * ftruncate is not supported by hugetlbfs in older
964 * hosts, so don't bother bailing out on errors.
965 * If anything goes wrong with it under other filesystems,
966 * mmap will fail.
967 */
968 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900969 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300970
Marcelo Tosattic9027602010-03-01 20:25:08 -0300971 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300972 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900973 perror("file_ram_alloc: can't mmap RAM pages");
974 close(fd);
975 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300976 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200977
978 if (mem_prealloc) {
979 int ret, i;
980 struct sigaction act, oldact;
981 sigset_t set, oldset;
982
983 memset(&act, 0, sizeof(act));
984 act.sa_handler = &sigbus_handler;
985 act.sa_flags = 0;
986
987 ret = sigaction(SIGBUS, &act, &oldact);
988 if (ret) {
989 perror("file_ram_alloc: failed to install signal handler");
990 exit(1);
991 }
992
993 /* unblock SIGBUS */
994 sigemptyset(&set);
995 sigaddset(&set, SIGBUS);
996 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
997
998 if (sigsetjmp(sigjump, 1)) {
999 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1000 exit(1);
1001 }
1002
1003 /* MAP_POPULATE silently ignores failures */
Marcelo Tosatti2ba82852013-12-18 16:42:17 -02001004 for (i = 0; i < (memory/hpagesize); i++) {
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001005 memset(area + (hpagesize*i), 0, 1);
1006 }
1007
1008 ret = sigaction(SIGBUS, &oldact, NULL);
1009 if (ret) {
1010 perror("file_ram_alloc: failed to reinstall signal handler");
1011 exit(1);
1012 }
1013
1014 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1015 }
1016
Alex Williamson04b16652010-07-02 11:13:17 -06001017 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001018 return area;
1019}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001020#else
1021static void *file_ram_alloc(RAMBlock *block,
1022 ram_addr_t memory,
1023 const char *path)
1024{
1025 fprintf(stderr, "-mem-path not supported on this host\n");
1026 exit(1);
1027}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001028#endif
1029
Alex Williamsond17b5282010-06-25 11:08:38 -06001030static ram_addr_t find_ram_offset(ram_addr_t size)
1031{
Alex Williamson04b16652010-07-02 11:13:17 -06001032 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001033 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001034
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001035 assert(size != 0); /* it would hand out same offset multiple times */
1036
Paolo Bonzinia3161032012-11-14 15:54:48 +01001037 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001038 return 0;
1039
Paolo Bonzinia3161032012-11-14 15:54:48 +01001040 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001041 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001042
1043 end = block->offset + block->length;
1044
Paolo Bonzinia3161032012-11-14 15:54:48 +01001045 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001046 if (next_block->offset >= end) {
1047 next = MIN(next, next_block->offset);
1048 }
1049 }
1050 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001051 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001052 mingap = next - end;
1053 }
1054 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001055
1056 if (offset == RAM_ADDR_MAX) {
1057 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1058 (uint64_t)size);
1059 abort();
1060 }
1061
Alex Williamson04b16652010-07-02 11:13:17 -06001062 return offset;
1063}
1064
Juan Quintela652d7ec2012-07-20 10:37:54 +02001065ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001066{
Alex Williamsond17b5282010-06-25 11:08:38 -06001067 RAMBlock *block;
1068 ram_addr_t last = 0;
1069
Paolo Bonzinia3161032012-11-14 15:54:48 +01001070 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001071 last = MAX(last, block->offset + block->length);
1072
1073 return last;
1074}
1075
Jason Baronddb97f12012-08-02 15:44:16 -04001076static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1077{
1078 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001079
1080 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001081 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1082 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001083 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1084 if (ret) {
1085 perror("qemu_madvise");
1086 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1087 "but dump_guest_core=off specified\n");
1088 }
1089 }
1090}
1091
Avi Kivityc5705a72011-12-20 15:59:12 +02001092void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001093{
1094 RAMBlock *new_block, *block;
1095
Avi Kivityc5705a72011-12-20 15:59:12 +02001096 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001097 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001098 if (block->offset == addr) {
1099 new_block = block;
1100 break;
1101 }
1102 }
1103 assert(new_block);
1104 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001105
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001106 if (dev) {
1107 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001108 if (id) {
1109 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001110 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001111 }
1112 }
1113 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1114
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001115 /* This assumes the iothread lock is taken here too. */
1116 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001117 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001118 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001119 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1120 new_block->idstr);
1121 abort();
1122 }
1123 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001124 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001125}
1126
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001127static int memory_try_enable_merging(void *addr, size_t len)
1128{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001129 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001130 /* disabled by the user */
1131 return 0;
1132 }
1133
1134 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1135}
1136
Avi Kivityc5705a72011-12-20 15:59:12 +02001137ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1138 MemoryRegion *mr)
1139{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001140 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001141
1142 size = TARGET_PAGE_ALIGN(size);
1143 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001144 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001145
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001146 /* This assumes the iothread lock is taken here too. */
1147 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001148 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001149 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001150 if (host) {
1151 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001152 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001153 } else if (xen_enabled()) {
1154 if (mem_path) {
1155 fprintf(stderr, "-mem-path not supported with Xen\n");
1156 exit(1);
1157 }
1158 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001159 } else {
1160 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001161 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1162 /*
1163 * file_ram_alloc() needs to allocate just like
1164 * phys_mem_alloc, but we haven't bothered to provide
1165 * a hook there.
1166 */
1167 fprintf(stderr,
1168 "-mem-path not supported with this accelerator\n");
1169 exit(1);
1170 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001171 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001172 }
1173 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001174 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001175 if (!new_block->host) {
1176 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1177 new_block->mr->name, strerror(errno));
1178 exit(1);
1179 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001180 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001181 }
1182 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001183 new_block->length = size;
1184
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001185 /* Keep the list sorted from biggest to smallest block. */
1186 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1187 if (block->length < new_block->length) {
1188 break;
1189 }
1190 }
1191 if (block) {
1192 QTAILQ_INSERT_BEFORE(block, new_block, next);
1193 } else {
1194 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1195 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001196 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001197
Umesh Deshpandef798b072011-08-18 11:41:17 -07001198 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001199 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001200
Anthony Liguori7267c092011-08-20 22:09:37 -05001201 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001202 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001203 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1204 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001205 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001206
Jason Baronddb97f12012-08-02 15:44:16 -04001207 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001208 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001209 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001210
Cam Macdonell84b89d72010-07-26 18:10:57 -06001211 if (kvm_enabled())
1212 kvm_setup_guest_memory(new_block->host, size);
1213
1214 return new_block->offset;
1215}
1216
Avi Kivityc5705a72011-12-20 15:59:12 +02001217ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001218{
Avi Kivityc5705a72011-12-20 15:59:12 +02001219 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001220}
bellarde9a1ab12007-02-08 23:08:38 +00001221
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001222void qemu_ram_free_from_ptr(ram_addr_t addr)
1223{
1224 RAMBlock *block;
1225
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001226 /* This assumes the iothread lock is taken here too. */
1227 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001228 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001229 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001230 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001231 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001232 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001233 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001234 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001235 }
1236 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001237 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001238}
1239
Anthony Liguoric227f092009-10-01 16:12:16 -05001240void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001241{
Alex Williamson04b16652010-07-02 11:13:17 -06001242 RAMBlock *block;
1243
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001244 /* This assumes the iothread lock is taken here too. */
1245 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001246 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001247 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001248 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001249 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001250 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001251 if (block->flags & RAM_PREALLOC_MASK) {
1252 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001253 } else if (xen_enabled()) {
1254 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001255#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001256 } else if (block->fd >= 0) {
1257 munmap(block->host, block->length);
1258 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001259#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001260 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001261 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001262 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001263 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001264 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001265 }
1266 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001267 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001268
bellarde9a1ab12007-02-08 23:08:38 +00001269}
1270
Huang Yingcd19cfa2011-03-02 08:56:19 +01001271#ifndef _WIN32
1272void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1273{
1274 RAMBlock *block;
1275 ram_addr_t offset;
1276 int flags;
1277 void *area, *vaddr;
1278
Paolo Bonzinia3161032012-11-14 15:54:48 +01001279 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001280 offset = addr - block->offset;
1281 if (offset < block->length) {
1282 vaddr = block->host + offset;
1283 if (block->flags & RAM_PREALLOC_MASK) {
1284 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001285 } else if (xen_enabled()) {
1286 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001287 } else {
1288 flags = MAP_FIXED;
1289 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001290 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001291#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001292 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1293 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001294#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001295 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001296#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001297 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1298 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001299 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001300 /*
1301 * Remap needs to match alloc. Accelerators that
1302 * set phys_mem_alloc never remap. If they did,
1303 * we'd need a remap hook here.
1304 */
1305 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1306
Huang Yingcd19cfa2011-03-02 08:56:19 +01001307 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1308 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1309 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001310 }
1311 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001312 fprintf(stderr, "Could not remap addr: "
1313 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001314 length, addr);
1315 exit(1);
1316 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001317 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001318 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001319 }
1320 return;
1321 }
1322 }
1323}
1324#endif /* !_WIN32 */
1325
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001326/* Return a host pointer to ram allocated with qemu_ram_alloc.
1327 With the exception of the softmmu code in this file, this should
1328 only be used for local memory (e.g. video ram) that the device owns,
1329 and knows it isn't going to access beyond the end of the block.
1330
1331 It should not be used for general purpose DMA.
1332 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1333 */
1334void *qemu_get_ram_ptr(ram_addr_t addr)
1335{
1336 RAMBlock *block = qemu_get_ram_block(addr);
1337
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001338 if (xen_enabled()) {
1339 /* We need to check if the requested address is in the RAM
1340 * because we don't want to map the entire memory in QEMU.
1341 * In that case just map until the end of the page.
1342 */
1343 if (block->offset == 0) {
1344 return xen_map_cache(addr, 0, 0);
1345 } else if (block->host == NULL) {
1346 block->host =
1347 xen_map_cache(block->offset, block->length, 1);
1348 }
1349 }
1350 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001351}
1352
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001353/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1354 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001355static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001356{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001357 if (*size == 0) {
1358 return NULL;
1359 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001360 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001361 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001362 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001363 RAMBlock *block;
1364
Paolo Bonzinia3161032012-11-14 15:54:48 +01001365 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001366 if (addr - block->offset < block->length) {
1367 if (addr - block->offset + *size > block->length)
1368 *size = block->length - addr + block->offset;
1369 return block->host + (addr - block->offset);
1370 }
1371 }
1372
1373 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1374 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001375 }
1376}
1377
Paolo Bonzini7443b432013-06-03 12:44:02 +02001378/* Some of the softmmu routines need to translate from a host pointer
1379 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001380MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001381{
pbrook94a6b542009-04-11 17:15:54 +00001382 RAMBlock *block;
1383 uint8_t *host = ptr;
1384
Jan Kiszka868bb332011-06-21 22:59:09 +02001385 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001386 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001387 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001388 }
1389
Paolo Bonzini23887b72013-05-06 14:28:39 +02001390 block = ram_list.mru_block;
1391 if (block && block->host && host - block->host < block->length) {
1392 goto found;
1393 }
1394
Paolo Bonzinia3161032012-11-14 15:54:48 +01001395 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001396 /* This case append when the block is not mapped. */
1397 if (block->host == NULL) {
1398 continue;
1399 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001400 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001401 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001402 }
pbrook94a6b542009-04-11 17:15:54 +00001403 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001404
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001405 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001406
1407found:
1408 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001409 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001410}
Alex Williamsonf471a172010-06-11 11:11:42 -06001411
Avi Kivitya8170e52012-10-23 12:30:10 +02001412static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001413 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001414{
bellard3a7d9292005-08-21 09:26:42 +00001415 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001416 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001417 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001418 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001419 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001420 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001421 switch (size) {
1422 case 1:
1423 stb_p(qemu_get_ram_ptr(ram_addr), val);
1424 break;
1425 case 2:
1426 stw_p(qemu_get_ram_ptr(ram_addr), val);
1427 break;
1428 case 4:
1429 stl_p(qemu_get_ram_ptr(ram_addr), val);
1430 break;
1431 default:
1432 abort();
1433 }
bellardf23db162005-08-21 19:12:28 +00001434 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001435 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001436 /* we remove the notdirty callback only if the code has been
1437 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001438 if (dirty_flags == 0xff) {
1439 CPUArchState *env = current_cpu->env_ptr;
1440 tlb_set_dirty(env, env->mem_io_vaddr);
1441 }
bellard1ccde1c2004-02-06 19:46:14 +00001442}
1443
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001444static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1445 unsigned size, bool is_write)
1446{
1447 return is_write;
1448}
1449
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001450static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001451 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001452 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001453 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001454};
1455
pbrook0f459d12008-06-09 00:20:13 +00001456/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001457static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001458{
Andreas Färber4917cf42013-05-27 05:17:50 +02001459 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001460 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001461 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001462 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001463 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001464
aliguori06d55cc2008-11-18 20:24:06 +00001465 if (env->watchpoint_hit) {
1466 /* We re-entered the check after replacing the TB. Now raise
1467 * the debug interrupt so that is will trigger after the
1468 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001469 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001470 return;
1471 }
pbrook2e70f6e2008-06-29 01:03:05 +00001472 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001473 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001474 if ((vaddr == (wp->vaddr & len_mask) ||
1475 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001476 wp->flags |= BP_WATCHPOINT_HIT;
1477 if (!env->watchpoint_hit) {
1478 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001479 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001480 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1481 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001482 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001483 } else {
1484 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1485 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001486 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001487 }
aliguori06d55cc2008-11-18 20:24:06 +00001488 }
aliguori6e140f22008-11-18 20:37:55 +00001489 } else {
1490 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001491 }
1492 }
1493}
1494
pbrook6658ffb2007-03-16 23:58:11 +00001495/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1496 so these check for a hit then pass through to the normal out-of-line
1497 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001498static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001499 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001500{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001501 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1502 switch (size) {
1503 case 1: return ldub_phys(addr);
1504 case 2: return lduw_phys(addr);
1505 case 4: return ldl_phys(addr);
1506 default: abort();
1507 }
pbrook6658ffb2007-03-16 23:58:11 +00001508}
1509
Avi Kivitya8170e52012-10-23 12:30:10 +02001510static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001511 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001512{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1514 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001515 case 1:
1516 stb_phys(addr, val);
1517 break;
1518 case 2:
1519 stw_phys(addr, val);
1520 break;
1521 case 4:
1522 stl_phys(addr, val);
1523 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001524 default: abort();
1525 }
pbrook6658ffb2007-03-16 23:58:11 +00001526}
1527
Avi Kivity1ec9b902012-01-02 12:47:48 +02001528static const MemoryRegionOps watch_mem_ops = {
1529 .read = watch_mem_read,
1530 .write = watch_mem_write,
1531 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001532};
pbrook6658ffb2007-03-16 23:58:11 +00001533
Avi Kivitya8170e52012-10-23 12:30:10 +02001534static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001535 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001536{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001537 subpage_t *subpage = opaque;
1538 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001539
blueswir1db7b5422007-05-26 17:36:03 +00001540#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001541 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001542 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001543#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001544 address_space_read(subpage->as, addr + subpage->base, buf, len);
1545 switch (len) {
1546 case 1:
1547 return ldub_p(buf);
1548 case 2:
1549 return lduw_p(buf);
1550 case 4:
1551 return ldl_p(buf);
1552 default:
1553 abort();
1554 }
blueswir1db7b5422007-05-26 17:36:03 +00001555}
1556
Avi Kivitya8170e52012-10-23 12:30:10 +02001557static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001558 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001559{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001560 subpage_t *subpage = opaque;
1561 uint8_t buf[4];
1562
blueswir1db7b5422007-05-26 17:36:03 +00001563#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001564 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001565 " value %"PRIx64"\n",
1566 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001567#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001568 switch (len) {
1569 case 1:
1570 stb_p(buf, value);
1571 break;
1572 case 2:
1573 stw_p(buf, value);
1574 break;
1575 case 4:
1576 stl_p(buf, value);
1577 break;
1578 default:
1579 abort();
1580 }
1581 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001582}
1583
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001584static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001585 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001586{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001587 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001588#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001589 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001590 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001591#endif
1592
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001593 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001594 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001595}
1596
Avi Kivity70c68e42012-01-02 12:32:48 +02001597static const MemoryRegionOps subpage_ops = {
1598 .read = subpage_read,
1599 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001600 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001601 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001602};
1603
Anthony Liguoric227f092009-10-01 16:12:16 -05001604static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001605 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001606{
1607 int idx, eidx;
1608
1609 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1610 return -1;
1611 idx = SUBPAGE_IDX(start);
1612 eidx = SUBPAGE_IDX(end);
1613#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001614 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1615 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001616#endif
blueswir1db7b5422007-05-26 17:36:03 +00001617 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001618 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001619 }
1620
1621 return 0;
1622}
1623
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001624static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001625{
Anthony Liguoric227f092009-10-01 16:12:16 -05001626 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001627
Anthony Liguori7267c092011-08-20 22:09:37 -05001628 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001629
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001630 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001631 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001632 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001633 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001634 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001635#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001636 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1637 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001638#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001639 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001640
1641 return mmio;
1642}
1643
Avi Kivity5312bd82012-02-12 18:32:55 +02001644static uint16_t dummy_section(MemoryRegion *mr)
1645{
1646 MemoryRegionSection section = {
1647 .mr = mr,
1648 .offset_within_address_space = 0,
1649 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001650 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001651 };
1652
1653 return phys_section_add(&section);
1654}
1655
Avi Kivitya8170e52012-10-23 12:30:10 +02001656MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001657{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001658 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001659}
1660
Avi Kivitye9179ce2009-06-14 11:38:52 +03001661static void io_mem_init(void)
1662{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001663 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1664 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001665 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001666 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001667 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001668 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001669 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001670}
1671
Avi Kivityac1970f2012-10-03 16:22:53 +02001672static void mem_begin(MemoryListener *listener)
1673{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001674 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001675 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1676
1677 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1678 d->as = as;
1679 as->next_dispatch = d;
1680}
1681
1682static void mem_commit(MemoryListener *listener)
1683{
1684 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001685 AddressSpaceDispatch *cur = as->dispatch;
1686 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001687
Paolo Bonzini0475d942013-05-29 12:28:21 +02001688 next->nodes = next_map.nodes;
1689 next->sections = next_map.sections;
1690
1691 as->dispatch = next;
1692 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001693}
1694
Avi Kivity50c1e142012-02-08 21:36:02 +02001695static void core_begin(MemoryListener *listener)
1696{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001697 uint16_t n;
1698
Paolo Bonzini60926662013-05-29 12:30:26 +02001699 prev_map = g_new(PhysPageMap, 1);
1700 *prev_map = next_map;
1701
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001702 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001703 n = dummy_section(&io_mem_unassigned);
1704 assert(n == PHYS_SECTION_UNASSIGNED);
1705 n = dummy_section(&io_mem_notdirty);
1706 assert(n == PHYS_SECTION_NOTDIRTY);
1707 n = dummy_section(&io_mem_rom);
1708 assert(n == PHYS_SECTION_ROM);
1709 n = dummy_section(&io_mem_watch);
1710 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001711}
1712
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001713/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1714 * All AddressSpaceDispatch instances have switched to the next map.
1715 */
1716static void core_commit(MemoryListener *listener)
1717{
Paolo Bonzini60926662013-05-29 12:30:26 +02001718 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001719}
1720
Avi Kivity1d711482012-10-02 18:54:45 +02001721static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001722{
Andreas Färber182735e2013-05-29 22:29:20 +02001723 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001724
1725 /* since each CPU stores ram addresses in its TLB cache, we must
1726 reset the modified entries */
1727 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001728 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001729 CPUArchState *env = cpu->env_ptr;
1730
Avi Kivity117712c2012-02-12 21:23:17 +02001731 tlb_flush(env, 1);
1732 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001733}
1734
Avi Kivity93632742012-02-08 16:54:16 +02001735static void core_log_global_start(MemoryListener *listener)
1736{
1737 cpu_physical_memory_set_dirty_tracking(1);
1738}
1739
1740static void core_log_global_stop(MemoryListener *listener)
1741{
1742 cpu_physical_memory_set_dirty_tracking(0);
1743}
1744
Avi Kivity93632742012-02-08 16:54:16 +02001745static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001746 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001747 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001748 .log_global_start = core_log_global_start,
1749 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001750 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001751};
1752
Avi Kivity1d711482012-10-02 18:54:45 +02001753static MemoryListener tcg_memory_listener = {
1754 .commit = tcg_commit,
1755};
1756
Avi Kivityac1970f2012-10-03 16:22:53 +02001757void address_space_init_dispatch(AddressSpace *as)
1758{
Paolo Bonzini00752702013-05-29 12:13:54 +02001759 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001760 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001761 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001762 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001763 .region_add = mem_add,
1764 .region_nop = mem_add,
1765 .priority = 0,
1766 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001767 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001768}
1769
Avi Kivity83f3c252012-10-07 12:59:55 +02001770void address_space_destroy_dispatch(AddressSpace *as)
1771{
1772 AddressSpaceDispatch *d = as->dispatch;
1773
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001774 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001775 g_free(d);
1776 as->dispatch = NULL;
1777}
1778
Avi Kivity62152b82011-07-26 14:26:14 +03001779static void memory_map_init(void)
1780{
Anthony Liguori7267c092011-08-20 22:09:37 -05001781 system_memory = g_malloc(sizeof(*system_memory));
Michael S. Tsirkinef9e4552013-11-10 11:54:33 +02001782 memory_region_init(system_memory, NULL, "system", INT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001783 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001784
Anthony Liguori7267c092011-08-20 22:09:37 -05001785 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001786 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1787 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001788 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001789
Avi Kivityf6790af2012-10-02 20:13:51 +02001790 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001791 if (tcg_enabled()) {
1792 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1793 }
Avi Kivity62152b82011-07-26 14:26:14 +03001794}
1795
1796MemoryRegion *get_system_memory(void)
1797{
1798 return system_memory;
1799}
1800
Avi Kivity309cb472011-08-08 16:09:03 +03001801MemoryRegion *get_system_io(void)
1802{
1803 return system_io;
1804}
1805
pbrooke2eef172008-06-08 01:09:01 +00001806#endif /* !defined(CONFIG_USER_ONLY) */
1807
bellard13eb76e2004-01-24 15:23:36 +00001808/* physical memory access (slow version, mainly for debug) */
1809#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001810int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001811 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001812{
1813 int l, flags;
1814 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001815 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001816
1817 while (len > 0) {
1818 page = addr & TARGET_PAGE_MASK;
1819 l = (page + TARGET_PAGE_SIZE) - addr;
1820 if (l > len)
1821 l = len;
1822 flags = page_get_flags(page);
1823 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001824 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001825 if (is_write) {
1826 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001827 return -1;
bellard579a97f2007-11-11 14:26:47 +00001828 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001829 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001830 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001831 memcpy(p, buf, l);
1832 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001833 } else {
1834 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001835 return -1;
bellard579a97f2007-11-11 14:26:47 +00001836 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001837 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001838 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001839 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001840 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001841 }
1842 len -= l;
1843 buf += l;
1844 addr += l;
1845 }
Paul Brooka68fe892010-03-01 00:08:59 +00001846 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001847}
bellard8df1cd02005-01-28 22:37:22 +00001848
bellard13eb76e2004-01-24 15:23:36 +00001849#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001850
Avi Kivitya8170e52012-10-23 12:30:10 +02001851static void invalidate_and_set_dirty(hwaddr addr,
1852 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001853{
1854 if (!cpu_physical_memory_is_dirty(addr)) {
1855 /* invalidate code */
1856 tb_invalidate_phys_page_range(addr, addr + length, 0);
1857 /* set dirty bit */
1858 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1859 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001860 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001861}
1862
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001863static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1864{
1865 if (memory_region_is_ram(mr)) {
1866 return !(is_write && mr->readonly);
1867 }
1868 if (memory_region_is_romd(mr)) {
1869 return !is_write;
1870 }
1871
1872 return false;
1873}
1874
Richard Henderson23326162013-07-08 14:55:59 -07001875static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001876{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001877 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001878
1879 /* Regions are assumed to support 1-4 byte accesses unless
1880 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001881 if (access_size_max == 0) {
1882 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001883 }
Richard Henderson23326162013-07-08 14:55:59 -07001884
1885 /* Bound the maximum access by the alignment of the address. */
1886 if (!mr->ops->impl.unaligned) {
1887 unsigned align_size_max = addr & -addr;
1888 if (align_size_max != 0 && align_size_max < access_size_max) {
1889 access_size_max = align_size_max;
1890 }
1891 }
1892
1893 /* Don't attempt accesses larger than the maximum. */
1894 if (l > access_size_max) {
1895 l = access_size_max;
1896 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001897 if (l & (l - 1)) {
1898 l = 1 << (qemu_fls(l) - 1);
1899 }
Richard Henderson23326162013-07-08 14:55:59 -07001900
1901 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001902}
1903
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001904bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001905 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001906{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001907 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001908 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001909 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001910 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001911 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001912 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001913
bellard13eb76e2004-01-24 15:23:36 +00001914 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001915 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001916 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001917
bellard13eb76e2004-01-24 15:23:36 +00001918 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001919 if (!memory_access_is_direct(mr, is_write)) {
1920 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001921 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001922 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001923 switch (l) {
1924 case 8:
1925 /* 64 bit write access */
1926 val = ldq_p(buf);
1927 error |= io_mem_write(mr, addr1, val, 8);
1928 break;
1929 case 4:
bellard1c213d12005-09-03 10:49:04 +00001930 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001931 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001932 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001933 break;
1934 case 2:
bellard1c213d12005-09-03 10:49:04 +00001935 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001936 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001937 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07001938 break;
1939 case 1:
bellard1c213d12005-09-03 10:49:04 +00001940 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001941 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001942 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07001943 break;
1944 default:
1945 abort();
bellard13eb76e2004-01-24 15:23:36 +00001946 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001947 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001948 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00001949 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001950 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001951 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001952 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001953 }
1954 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001955 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001956 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001957 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07001958 switch (l) {
1959 case 8:
1960 /* 64 bit read access */
1961 error |= io_mem_read(mr, addr1, &val, 8);
1962 stq_p(buf, val);
1963 break;
1964 case 4:
bellard13eb76e2004-01-24 15:23:36 +00001965 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001966 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001967 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001968 break;
1969 case 2:
bellard13eb76e2004-01-24 15:23:36 +00001970 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001971 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001972 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001973 break;
1974 case 1:
bellard1c213d12005-09-03 10:49:04 +00001975 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001976 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001977 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001978 break;
1979 default:
1980 abort();
bellard13eb76e2004-01-24 15:23:36 +00001981 }
1982 } else {
1983 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001984 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001985 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001986 }
1987 }
1988 len -= l;
1989 buf += l;
1990 addr += l;
1991 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001992
1993 return error;
bellard13eb76e2004-01-24 15:23:36 +00001994}
bellard8df1cd02005-01-28 22:37:22 +00001995
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001996bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001997 const uint8_t *buf, int len)
1998{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001999 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002000}
2001
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002002bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002003{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002004 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002005}
2006
2007
Avi Kivitya8170e52012-10-23 12:30:10 +02002008void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002009 int len, int is_write)
2010{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002011 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002012}
2013
Alexander Grafa94b36d2013-12-12 10:29:19 +01002014enum write_rom_type {
2015 WRITE_DATA,
2016 FLUSH_CACHE,
2017};
2018
2019static inline void cpu_physical_memory_write_rom_internal(
2020 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002021{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002022 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002023 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002024 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002025 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002026
bellardd0ecd2a2006-04-23 17:14:48 +00002027 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002028 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002029 mr = address_space_translate(&address_space_memory,
2030 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002031
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002032 if (!(memory_region_is_ram(mr) ||
2033 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002034 /* do nothing */
2035 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002036 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002037 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002038 ptr = qemu_get_ram_ptr(addr1);
Alexander Grafa94b36d2013-12-12 10:29:19 +01002039 switch (type) {
2040 case WRITE_DATA:
2041 memcpy(ptr, buf, l);
2042 invalidate_and_set_dirty(addr1, l);
2043 break;
2044 case FLUSH_CACHE:
2045 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2046 break;
2047 }
bellardd0ecd2a2006-04-23 17:14:48 +00002048 }
2049 len -= l;
2050 buf += l;
2051 addr += l;
2052 }
2053}
2054
Alexander Grafa94b36d2013-12-12 10:29:19 +01002055/* used for ROM loading : can write in RAM and ROM */
2056void cpu_physical_memory_write_rom(hwaddr addr,
2057 const uint8_t *buf, int len)
2058{
2059 cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2060}
2061
2062void cpu_flush_icache_range(hwaddr start, int len)
2063{
2064 /*
2065 * This function should do the same thing as an icache flush that was
2066 * triggered from within the guest. For TCG we are always cache coherent,
2067 * so there is no need to flush anything. For KVM / Xen we need to flush
2068 * the host's instruction cache at least.
2069 */
2070 if (tcg_enabled()) {
2071 return;
2072 }
2073
2074 cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2075}
2076
aliguori6d16c2f2009-01-22 16:59:11 +00002077typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002078 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002079 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002080 hwaddr addr;
2081 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002082} BounceBuffer;
2083
2084static BounceBuffer bounce;
2085
aliguoriba223c22009-01-22 16:59:16 +00002086typedef struct MapClient {
2087 void *opaque;
2088 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002089 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002090} MapClient;
2091
Blue Swirl72cf2d42009-09-12 07:36:22 +00002092static QLIST_HEAD(map_client_list, MapClient) map_client_list
2093 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002094
2095void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2096{
Anthony Liguori7267c092011-08-20 22:09:37 -05002097 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002098
2099 client->opaque = opaque;
2100 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002101 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002102 return client;
2103}
2104
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002105static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002106{
2107 MapClient *client = (MapClient *)_client;
2108
Blue Swirl72cf2d42009-09-12 07:36:22 +00002109 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002110 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002111}
2112
2113static void cpu_notify_map_clients(void)
2114{
2115 MapClient *client;
2116
Blue Swirl72cf2d42009-09-12 07:36:22 +00002117 while (!QLIST_EMPTY(&map_client_list)) {
2118 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002119 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002120 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002121 }
2122}
2123
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002124bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2125{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002126 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002127 hwaddr l, xlat;
2128
2129 while (len > 0) {
2130 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002131 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2132 if (!memory_access_is_direct(mr, is_write)) {
2133 l = memory_access_size(mr, l, addr);
2134 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002135 return false;
2136 }
2137 }
2138
2139 len -= l;
2140 addr += l;
2141 }
2142 return true;
2143}
2144
aliguori6d16c2f2009-01-22 16:59:11 +00002145/* Map a physical memory region into a host virtual address.
2146 * May map a subset of the requested range, given by and returned in *plen.
2147 * May return NULL if resources needed to perform the mapping are exhausted.
2148 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002149 * Use cpu_register_map_client() to know when retrying the map operation is
2150 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002151 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002152void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002153 hwaddr addr,
2154 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002155 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002156{
Avi Kivitya8170e52012-10-23 12:30:10 +02002157 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002158 hwaddr done = 0;
2159 hwaddr l, xlat, base;
2160 MemoryRegion *mr, *this_mr;
2161 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002162
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002163 if (len == 0) {
2164 return NULL;
2165 }
aliguori6d16c2f2009-01-22 16:59:11 +00002166
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002167 l = len;
2168 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2169 if (!memory_access_is_direct(mr, is_write)) {
2170 if (bounce.buffer) {
2171 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002172 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002173 /* Avoid unbounded allocations */
2174 l = MIN(l, TARGET_PAGE_SIZE);
2175 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002176 bounce.addr = addr;
2177 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002178
2179 memory_region_ref(mr);
2180 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002181 if (!is_write) {
2182 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002183 }
aliguori6d16c2f2009-01-22 16:59:11 +00002184
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002185 *plen = l;
2186 return bounce.buffer;
2187 }
2188
2189 base = xlat;
2190 raddr = memory_region_get_ram_addr(mr);
2191
2192 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002193 len -= l;
2194 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002195 done += l;
2196 if (len == 0) {
2197 break;
2198 }
2199
2200 l = len;
2201 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2202 if (this_mr != mr || xlat != base + done) {
2203 break;
2204 }
aliguori6d16c2f2009-01-22 16:59:11 +00002205 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002206
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002207 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002208 *plen = done;
2209 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002210}
2211
Avi Kivityac1970f2012-10-03 16:22:53 +02002212/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002213 * Will also mark the memory as dirty if is_write == 1. access_len gives
2214 * the amount of memory that was actually read or written by the caller.
2215 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002216void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2217 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002218{
2219 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002220 MemoryRegion *mr;
2221 ram_addr_t addr1;
2222
2223 mr = qemu_ram_addr_from_host(buffer, &addr1);
2224 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002225 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002226 while (access_len) {
2227 unsigned l;
2228 l = TARGET_PAGE_SIZE;
2229 if (l > access_len)
2230 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002231 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002232 addr1 += l;
2233 access_len -= l;
2234 }
2235 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002236 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002237 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002238 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002239 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002240 return;
2241 }
2242 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002243 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002244 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002245 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002246 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002247 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002248 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002249}
bellardd0ecd2a2006-04-23 17:14:48 +00002250
Avi Kivitya8170e52012-10-23 12:30:10 +02002251void *cpu_physical_memory_map(hwaddr addr,
2252 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002253 int is_write)
2254{
2255 return address_space_map(&address_space_memory, addr, plen, is_write);
2256}
2257
Avi Kivitya8170e52012-10-23 12:30:10 +02002258void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2259 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002260{
2261 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2262}
2263
bellard8df1cd02005-01-28 22:37:22 +00002264/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002265static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002266 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002267{
bellard8df1cd02005-01-28 22:37:22 +00002268 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002269 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002270 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002271 hwaddr l = 4;
2272 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002273
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002274 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2275 false);
2276 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002277 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002278 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002279#if defined(TARGET_WORDS_BIGENDIAN)
2280 if (endian == DEVICE_LITTLE_ENDIAN) {
2281 val = bswap32(val);
2282 }
2283#else
2284 if (endian == DEVICE_BIG_ENDIAN) {
2285 val = bswap32(val);
2286 }
2287#endif
bellard8df1cd02005-01-28 22:37:22 +00002288 } else {
2289 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002290 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002291 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002292 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002293 switch (endian) {
2294 case DEVICE_LITTLE_ENDIAN:
2295 val = ldl_le_p(ptr);
2296 break;
2297 case DEVICE_BIG_ENDIAN:
2298 val = ldl_be_p(ptr);
2299 break;
2300 default:
2301 val = ldl_p(ptr);
2302 break;
2303 }
bellard8df1cd02005-01-28 22:37:22 +00002304 }
2305 return val;
2306}
2307
Avi Kivitya8170e52012-10-23 12:30:10 +02002308uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002309{
2310 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2311}
2312
Avi Kivitya8170e52012-10-23 12:30:10 +02002313uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002314{
2315 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2316}
2317
Avi Kivitya8170e52012-10-23 12:30:10 +02002318uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002319{
2320 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2321}
2322
bellard84b7b8e2005-11-28 21:19:04 +00002323/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002324static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002325 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002326{
bellard84b7b8e2005-11-28 21:19:04 +00002327 uint8_t *ptr;
2328 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002329 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002330 hwaddr l = 8;
2331 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002332
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002333 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2334 false);
2335 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002336 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002337 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002338#if defined(TARGET_WORDS_BIGENDIAN)
2339 if (endian == DEVICE_LITTLE_ENDIAN) {
2340 val = bswap64(val);
2341 }
2342#else
2343 if (endian == DEVICE_BIG_ENDIAN) {
2344 val = bswap64(val);
2345 }
2346#endif
bellard84b7b8e2005-11-28 21:19:04 +00002347 } else {
2348 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002349 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002350 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002351 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002352 switch (endian) {
2353 case DEVICE_LITTLE_ENDIAN:
2354 val = ldq_le_p(ptr);
2355 break;
2356 case DEVICE_BIG_ENDIAN:
2357 val = ldq_be_p(ptr);
2358 break;
2359 default:
2360 val = ldq_p(ptr);
2361 break;
2362 }
bellard84b7b8e2005-11-28 21:19:04 +00002363 }
2364 return val;
2365}
2366
Avi Kivitya8170e52012-10-23 12:30:10 +02002367uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002368{
2369 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2370}
2371
Avi Kivitya8170e52012-10-23 12:30:10 +02002372uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002373{
2374 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2375}
2376
Avi Kivitya8170e52012-10-23 12:30:10 +02002377uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002378{
2379 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2380}
2381
bellardaab33092005-10-30 20:48:42 +00002382/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002383uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002384{
2385 uint8_t val;
2386 cpu_physical_memory_read(addr, &val, 1);
2387 return val;
2388}
2389
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002390/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002391static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002392 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002393{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002394 uint8_t *ptr;
2395 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002396 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002397 hwaddr l = 2;
2398 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002399
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002400 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2401 false);
2402 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002403 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002404 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002405#if defined(TARGET_WORDS_BIGENDIAN)
2406 if (endian == DEVICE_LITTLE_ENDIAN) {
2407 val = bswap16(val);
2408 }
2409#else
2410 if (endian == DEVICE_BIG_ENDIAN) {
2411 val = bswap16(val);
2412 }
2413#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002414 } else {
2415 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002416 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002417 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002418 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002419 switch (endian) {
2420 case DEVICE_LITTLE_ENDIAN:
2421 val = lduw_le_p(ptr);
2422 break;
2423 case DEVICE_BIG_ENDIAN:
2424 val = lduw_be_p(ptr);
2425 break;
2426 default:
2427 val = lduw_p(ptr);
2428 break;
2429 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002430 }
2431 return val;
bellardaab33092005-10-30 20:48:42 +00002432}
2433
Avi Kivitya8170e52012-10-23 12:30:10 +02002434uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002435{
2436 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2437}
2438
Avi Kivitya8170e52012-10-23 12:30:10 +02002439uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440{
2441 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2442}
2443
Avi Kivitya8170e52012-10-23 12:30:10 +02002444uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002445{
2446 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2447}
2448
bellard8df1cd02005-01-28 22:37:22 +00002449/* warning: addr must be aligned. The ram page is not masked as dirty
2450 and the code inside is not invalidated. It is useful if the dirty
2451 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002452void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002453{
bellard8df1cd02005-01-28 22:37:22 +00002454 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002455 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002456 hwaddr l = 4;
2457 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002458
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002459 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2460 true);
2461 if (l < 4 || !memory_access_is_direct(mr, true)) {
2462 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002463 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002464 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002465 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002466 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002467
2468 if (unlikely(in_migration)) {
2469 if (!cpu_physical_memory_is_dirty(addr1)) {
2470 /* invalidate code */
2471 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2472 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002473 cpu_physical_memory_set_dirty_flags(
2474 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002475 }
2476 }
bellard8df1cd02005-01-28 22:37:22 +00002477 }
2478}
2479
2480/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002481static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002483{
bellard8df1cd02005-01-28 22:37:22 +00002484 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002485 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002486 hwaddr l = 4;
2487 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002488
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002489 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2490 true);
2491 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002492#if defined(TARGET_WORDS_BIGENDIAN)
2493 if (endian == DEVICE_LITTLE_ENDIAN) {
2494 val = bswap32(val);
2495 }
2496#else
2497 if (endian == DEVICE_BIG_ENDIAN) {
2498 val = bswap32(val);
2499 }
2500#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002501 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002502 } else {
bellard8df1cd02005-01-28 22:37:22 +00002503 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002504 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002505 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002506 switch (endian) {
2507 case DEVICE_LITTLE_ENDIAN:
2508 stl_le_p(ptr, val);
2509 break;
2510 case DEVICE_BIG_ENDIAN:
2511 stl_be_p(ptr, val);
2512 break;
2513 default:
2514 stl_p(ptr, val);
2515 break;
2516 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002517 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002518 }
2519}
2520
Avi Kivitya8170e52012-10-23 12:30:10 +02002521void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002522{
2523 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2524}
2525
Avi Kivitya8170e52012-10-23 12:30:10 +02002526void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002527{
2528 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2529}
2530
Avi Kivitya8170e52012-10-23 12:30:10 +02002531void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002532{
2533 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2534}
2535
bellardaab33092005-10-30 20:48:42 +00002536/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002537void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002538{
2539 uint8_t v = val;
2540 cpu_physical_memory_write(addr, &v, 1);
2541}
2542
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002543/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002544static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002545 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002546{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002547 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002548 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002549 hwaddr l = 2;
2550 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002551
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002552 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2553 true);
2554 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002555#if defined(TARGET_WORDS_BIGENDIAN)
2556 if (endian == DEVICE_LITTLE_ENDIAN) {
2557 val = bswap16(val);
2558 }
2559#else
2560 if (endian == DEVICE_BIG_ENDIAN) {
2561 val = bswap16(val);
2562 }
2563#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002564 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002565 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002566 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002567 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002568 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002569 switch (endian) {
2570 case DEVICE_LITTLE_ENDIAN:
2571 stw_le_p(ptr, val);
2572 break;
2573 case DEVICE_BIG_ENDIAN:
2574 stw_be_p(ptr, val);
2575 break;
2576 default:
2577 stw_p(ptr, val);
2578 break;
2579 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002580 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002581 }
bellardaab33092005-10-30 20:48:42 +00002582}
2583
Avi Kivitya8170e52012-10-23 12:30:10 +02002584void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002585{
2586 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2587}
2588
Avi Kivitya8170e52012-10-23 12:30:10 +02002589void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002590{
2591 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2592}
2593
Avi Kivitya8170e52012-10-23 12:30:10 +02002594void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002595{
2596 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2597}
2598
bellardaab33092005-10-30 20:48:42 +00002599/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002600void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002601{
2602 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002603 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002604}
2605
Avi Kivitya8170e52012-10-23 12:30:10 +02002606void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002607{
2608 val = cpu_to_le64(val);
2609 cpu_physical_memory_write(addr, &val, 8);
2610}
2611
Avi Kivitya8170e52012-10-23 12:30:10 +02002612void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002613{
2614 val = cpu_to_be64(val);
2615 cpu_physical_memory_write(addr, &val, 8);
2616}
2617
aliguori5e2972f2009-03-28 17:51:36 +00002618/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002619int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002620 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002621{
2622 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002623 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002624 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002625
2626 while (len > 0) {
2627 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002628 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002629 /* if no physical page mapped, return an error */
2630 if (phys_addr == -1)
2631 return -1;
2632 l = (page + TARGET_PAGE_SIZE) - addr;
2633 if (l > len)
2634 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002635 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002636 if (is_write)
2637 cpu_physical_memory_write_rom(phys_addr, buf, l);
2638 else
aliguori5e2972f2009-03-28 17:51:36 +00002639 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002640 len -= l;
2641 buf += l;
2642 addr += l;
2643 }
2644 return 0;
2645}
Paul Brooka68fe892010-03-01 00:08:59 +00002646#endif
bellard13eb76e2004-01-24 15:23:36 +00002647
Blue Swirl8e4a4242013-01-06 18:30:17 +00002648#if !defined(CONFIG_USER_ONLY)
2649
2650/*
2651 * A helper function for the _utterly broken_ virtio device model to find out if
2652 * it's running on a big endian machine. Don't do this at home kids!
2653 */
2654bool virtio_is_big_endian(void);
2655bool virtio_is_big_endian(void)
2656{
2657#if defined(TARGET_WORDS_BIGENDIAN)
2658 return true;
2659#else
2660 return false;
2661#endif
2662}
2663
2664#endif
2665
Wen Congyang76f35532012-05-07 12:04:18 +08002666#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002667bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002668{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002669 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002670 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002671
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002672 mr = address_space_translate(&address_space_memory,
2673 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002674
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002675 return !(memory_region_is_ram(mr) ||
2676 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002677}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002678
2679void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2680{
2681 RAMBlock *block;
2682
2683 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2684 func(block->host, block->offset, block->length, opaque);
2685 }
2686}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002687#endif