blob: 9cd936c5dfa1eea23282ba34981ce89454f6d850 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020066
Paolo Bonzini0844e002013-05-24 14:37:28 +020067MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020068static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069
pbrooke2eef172008-06-08 01:09:01 +000070#endif
bellard9fa3e852004-01-04 18:06:42 +000071
Andreas Färber182735e2013-05-29 22:29:20 +020072CPUState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000073/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020075DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000076/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000077 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000078 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010079int use_icount;
bellard6a00d602005-11-21 23:25:50 +000080
pbrooke2eef172008-06-08 01:09:01 +000081#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020082
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020083typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
Paolo Bonzini0475d942013-05-29 12:28:21 +020091typedef PhysPageEntry Node[L2_SIZE];
92
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020093struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +020098 Node *nodes;
99 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200100 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101};
102
Jan Kiszka90260c62013-05-26 21:46:51 +0200103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200106 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200115
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
Paolo Bonzini60926662013-05-29 12:30:26 +0200125static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200126static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127
Avi Kivity07f07b32012-02-13 20:45:32 +0200128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200129
pbrooke2eef172008-06-08 01:09:01 +0000130static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300131static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000132static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000133
Avi Kivity1ec9b902012-01-02 12:47:48 +0200134static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000135#endif
bellard54936002003-05-13 00:25:15 +0000136
Paul Brook6d9a1302010-02-28 23:55:53 +0000137#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139static void phys_map_node_reserve(unsigned nodes)
140{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200141 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
142 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
143 16);
144 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
145 next_map.nodes_nb + nodes);
146 next_map.nodes = g_renew(Node, next_map.nodes,
147 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148 }
149}
150
151static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200152{
153 unsigned i;
154 uint16_t ret;
155
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200156 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200158 assert(ret != next_map.nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200159 for (i = 0; i < L2_SIZE; ++i) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200160 next_map.nodes[ret][i].is_leaf = 0;
161 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200163 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200164}
165
Avi Kivitya8170e52012-10-23 12:30:10 +0200166static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
167 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200168 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200169{
170 PhysPageEntry *p;
171 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200172 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173
Avi Kivity07f07b32012-02-13 20:45:32 +0200174 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200175 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200176 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200177 if (level == 0) {
178 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200179 p[i].is_leaf = 1;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200180 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200181 }
182 }
183 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200184 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185 }
Avi Kivity29990972012-02-13 20:21:20 +0200186 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187
Avi Kivity29990972012-02-13 20:21:20 +0200188 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200189 if ((*index & (step - 1)) == 0 && *nb >= step) {
190 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200191 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 *index += step;
193 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200194 } else {
195 phys_page_set_level(lp, index, nb, leaf, level - 1);
196 }
197 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198 }
199}
200
Avi Kivityac1970f2012-10-03 16:22:53 +0200201static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200202 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200203 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000204{
Avi Kivity29990972012-02-13 20:21:20 +0200205 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200206 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000207
Avi Kivityac1970f2012-10-03 16:22:53 +0200208 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000209}
210
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200211static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
212 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000213{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200214 PhysPageEntry *p;
215 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200216
Avi Kivity07f07b32012-02-13 20:45:32 +0200217 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200218 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200219 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200220 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200221 p = nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200222 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200223 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200224 return &sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200225}
226
Blue Swirle5548612012-04-21 13:08:33 +0000227bool memory_region_is_unassigned(MemoryRegion *mr)
228{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200229 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000230 && mr != &io_mem_watch;
231}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200232
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200233static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200234 hwaddr addr,
235 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200236{
Jan Kiszka90260c62013-05-26 21:46:51 +0200237 MemoryRegionSection *section;
238 subpage_t *subpage;
239
Paolo Bonzini0475d942013-05-29 12:28:21 +0200240 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
241 d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200242 if (resolve_subpage && section->mr->subpage) {
243 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200244 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200245 }
246 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200247}
248
Jan Kiszka90260c62013-05-26 21:46:51 +0200249static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200250address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200251 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200252{
253 MemoryRegionSection *section;
254 Int128 diff;
255
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200256 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200257 /* Compute offset within MemoryRegionSection */
258 addr -= section->offset_within_address_space;
259
260 /* Compute offset within MemoryRegion */
261 *xlat = addr + section->offset_within_region;
262
263 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100264 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200265 return section;
266}
Jan Kiszka90260c62013-05-26 21:46:51 +0200267
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200268MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
269 hwaddr *xlat, hwaddr *plen,
270 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200271{
Avi Kivity30951152012-10-30 13:47:46 +0200272 IOMMUTLBEntry iotlb;
273 MemoryRegionSection *section;
274 MemoryRegion *mr;
275 hwaddr len = *plen;
276
277 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200278 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200279 mr = section->mr;
280
281 if (!mr->iommu_ops) {
282 break;
283 }
284
285 iotlb = mr->iommu_ops->translate(mr, addr);
286 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
287 | (addr & iotlb.addr_mask));
288 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
289 if (!(iotlb.perm & (1 << is_write))) {
290 mr = &io_mem_unassigned;
291 break;
292 }
293
294 as = iotlb.target_as;
295 }
296
297 *plen = len;
298 *xlat = addr;
299 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200300}
301
302MemoryRegionSection *
303address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
304 hwaddr *plen)
305{
Avi Kivity30951152012-10-30 13:47:46 +0200306 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200307 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200308
309 assert(!section->mr->iommu_ops);
310 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200311}
bellard9fa3e852004-01-04 18:06:42 +0000312#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000313
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200314void cpu_exec_init_all(void)
315{
316#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700317 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200318 memory_map_init();
319 io_mem_init();
320#endif
321}
322
Andreas Färberb170fce2013-01-20 20:23:22 +0100323#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000324
Juan Quintelae59fb372009-09-29 22:48:21 +0200325static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200326{
Andreas Färber259186a2013-01-17 18:51:17 +0100327 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200328
aurel323098dba2009-03-07 21:28:24 +0000329 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
330 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100331 cpu->interrupt_request &= ~0x01;
332 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000333
334 return 0;
335}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200336
Andreas Färber1a1562f2013-06-17 04:09:11 +0200337const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200338 .name = "cpu_common",
339 .version_id = 1,
340 .minimum_version_id = 1,
341 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200342 .post_load = cpu_common_post_load,
343 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100344 VMSTATE_UINT32(halted, CPUState),
345 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200346 VMSTATE_END_OF_LIST()
347 }
348};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200349
pbrook9656f322008-07-01 20:01:19 +0000350#endif
351
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100352CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400353{
Andreas Färber182735e2013-05-29 22:29:20 +0200354 CPUState *cpu = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400355
Andreas Färber182735e2013-05-29 22:29:20 +0200356 while (cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100357 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400358 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100359 }
Andreas Färber182735e2013-05-29 22:29:20 +0200360 cpu = cpu->next_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400361 }
362
Andreas Färber182735e2013-05-29 22:29:20 +0200363 return cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400364}
365
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200366void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
367{
Andreas Färber182735e2013-05-29 22:29:20 +0200368 CPUState *cpu;
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200369
Andreas Färber182735e2013-05-29 22:29:20 +0200370 cpu = first_cpu;
371 while (cpu) {
372 func(cpu, data);
373 cpu = cpu->next_cpu;
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200374 }
375}
376
Andreas Färber9349b4f2012-03-14 01:38:32 +0100377void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000378{
Andreas Färber9f09e182012-05-03 06:59:07 +0200379 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100380 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber182735e2013-05-29 22:29:20 +0200381 CPUState **pcpu;
bellard6a00d602005-11-21 23:25:50 +0000382 int cpu_index;
383
pbrookc2764712009-03-07 15:24:59 +0000384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
Andreas Färber182735e2013-05-29 22:29:20 +0200387 cpu->next_cpu = NULL;
388 pcpu = &first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000389 cpu_index = 0;
Andreas Färber182735e2013-05-29 22:29:20 +0200390 while (*pcpu != NULL) {
391 pcpu = &(*pcpu)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000392 cpu_index++;
393 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100394 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100395 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100398#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200399 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100400#endif
Andreas Färber182735e2013-05-29 22:29:20 +0200401 *pcpu = cpu;
pbrookc2764712009-03-07 15:24:59 +0000402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000408 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100409 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000410#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
bellardfd6ce8f2003-05-14 19:00:11 +0000414}
415
bellard1fddef42005-04-17 19:16:13 +0000416#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000417#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100418static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400423static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
424{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
426 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400427}
bellardc27004e2005-01-03 23:35:10 +0000428#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000429#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000430
Paul Brookc527ee82010-03-01 03:31:14 +0000431#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000433
434{
435}
436
Andreas Färber9349b4f2012-03-14 01:38:32 +0100437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
pbrook6658ffb2007-03-16 23:58:11 +0000443/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000445 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000446{
aliguorib4051332008-11-18 20:14:20 +0000447 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000448 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000449
aliguorib4051332008-11-18 20:14:20 +0000450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500457 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000458
aliguoria1d1bb32008-11-18 20:07:32 +0000459 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000460 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000461 wp->flags = flags;
462
aliguori2dc9f412008-11-18 20:56:59 +0000463 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000464 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000466 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000468
pbrook6658ffb2007-03-16 23:58:11 +0000469 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000474}
475
aliguoria1d1bb32008-11-18 20:07:32 +0000476/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000478 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000479{
aliguorib4051332008-11-18 20:14:20 +0000480 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000481 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000482
Blue Swirl72cf2d42009-09-12 07:36:22 +0000483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000484 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000486 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000487 return 0;
488 }
489 }
aliguoria1d1bb32008-11-18 20:07:32 +0000490 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000491}
492
aliguoria1d1bb32008-11-18 20:07:32 +0000493/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000495{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000497
aliguoria1d1bb32008-11-18 20:07:32 +0000498 tlb_flush_page(env, watchpoint->vaddr);
499
Anthony Liguori7267c092011-08-20 22:09:37 -0500500 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000501}
502
aliguoria1d1bb32008-11-18 20:07:32 +0000503/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000505{
aliguoric0ce9982008-11-25 22:13:57 +0000506 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000507
Blue Swirl72cf2d42009-09-12 07:36:22 +0000508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000511 }
aliguoria1d1bb32008-11-18 20:07:32 +0000512}
Paul Brookc527ee82010-03-01 03:31:14 +0000513#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000514
515/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000517 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000518{
bellard1fddef42005-04-17 19:16:13 +0000519#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000520 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000521
Anthony Liguori7267c092011-08-20 22:09:37 -0500522 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000523
524 bp->pc = pc;
525 bp->flags = flags;
526
aliguori2dc9f412008-11-18 20:56:59 +0000527 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000528 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000530 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000532
533 breakpoint_invalidate(env, pc);
534
535 if (breakpoint)
536 *breakpoint = bp;
537 return 0;
538#else
539 return -ENOSYS;
540#endif
541}
542
543/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000545{
546#if defined(TARGET_HAS_ICE)
547 CPUBreakpoint *bp;
548
Blue Swirl72cf2d42009-09-12 07:36:22 +0000549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000552 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000553 }
bellard4c3a88a2003-07-26 12:06:08 +0000554 }
aliguoria1d1bb32008-11-18 20:07:32 +0000555 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000556#else
aliguoria1d1bb32008-11-18 20:07:32 +0000557 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000558#endif
559}
560
aliguoria1d1bb32008-11-18 20:07:32 +0000561/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000563{
bellard1fddef42005-04-17 19:16:13 +0000564#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000566
aliguoria1d1bb32008-11-18 20:07:32 +0000567 breakpoint_invalidate(env, breakpoint->pc);
568
Anthony Liguori7267c092011-08-20 22:09:37 -0500569 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000570#endif
571}
572
573/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000575{
576#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000577 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000578
Blue Swirl72cf2d42009-09-12 07:36:22 +0000579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000582 }
bellard4c3a88a2003-07-26 12:06:08 +0000583#endif
584}
585
bellardc33a3462003-07-29 20:50:33 +0000586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200588void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000589{
bellard1fddef42005-04-17 19:16:13 +0000590#if defined(TARGET_HAS_ICE)
Andreas Färber3825b282013-06-24 18:41:06 +0200591 CPUArchState *env = cpu->env_ptr;
Andreas Färbered2803d2013-06-21 20:20:45 +0200592
593 if (cpu->singlestep_enabled != enabled) {
594 cpu->singlestep_enabled = enabled;
595 if (kvm_enabled()) {
aliguorie22a25c2009-03-12 20:12:48 +0000596 kvm_update_guest_debug(env, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200597 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100598 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000599 /* XXX: only flush what is necessary */
600 tb_flush(env);
601 }
bellardc33a3462003-07-29 20:50:33 +0000602 }
603#endif
604}
605
Andreas Färber9349b4f2012-03-14 01:38:32 +0100606void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000607{
Andreas Färber878096e2013-05-27 01:33:50 +0200608 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000609 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000610 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000611
612 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000613 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000614 fprintf(stderr, "qemu: fatal: ");
615 vfprintf(stderr, fmt, ap);
616 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200617 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000618 if (qemu_log_enabled()) {
619 qemu_log("qemu: fatal: ");
620 qemu_log_vprintf(fmt, ap2);
621 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200622 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000623 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000624 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000625 }
pbrook493ae1f2007-11-23 16:53:59 +0000626 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000627 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200628#if defined(CONFIG_USER_ONLY)
629 {
630 struct sigaction act;
631 sigfillset(&act.sa_mask);
632 act.sa_handler = SIG_DFL;
633 sigaction(SIGABRT, &act, NULL);
634 }
635#endif
bellard75012672003-06-21 13:11:07 +0000636 abort();
637}
638
Andreas Färber9349b4f2012-03-14 01:38:32 +0100639CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000640{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100641 CPUArchState *new_env = cpu_init(env->cpu_model_str);
aliguori5a38f082009-01-15 20:16:51 +0000642#if defined(TARGET_HAS_ICE)
643 CPUBreakpoint *bp;
644 CPUWatchpoint *wp;
645#endif
646
Andreas Färber9349b4f2012-03-14 01:38:32 +0100647 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000648
aliguori5a38f082009-01-15 20:16:51 +0000649 /* Clone all break/watchpoints.
650 Note: Once we support ptrace with hw-debug register access, make sure
651 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000652 QTAILQ_INIT(&env->breakpoints);
653 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000654#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000655 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000656 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
657 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000658 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000659 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
660 wp->flags, NULL);
661 }
662#endif
663
thsc5be9f02007-02-28 20:20:53 +0000664 return new_env;
665}
666
bellard01243112004-01-04 15:48:17 +0000667#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200668static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
669 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000670{
Juan Quintelad24981d2012-05-22 00:42:40 +0200671 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000672
bellard1ccde1c2004-02-06 19:46:14 +0000673 /* we modify the TLB cache so that the dirty bit will be set again
674 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200675 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200676 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000677 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200678 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000679 != (end - 1) - start) {
680 abort();
681 }
Blue Swirle5548612012-04-21 13:08:33 +0000682 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200683
684}
685
686/* Note: start and end must be within the same ram block. */
687void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
688 int dirty_flags)
689{
690 uintptr_t length;
691
692 start &= TARGET_PAGE_MASK;
693 end = TARGET_PAGE_ALIGN(end);
694
695 length = end - start;
696 if (length == 0)
697 return;
698 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
699
700 if (tcg_enabled()) {
701 tlb_reset_dirty_range_all(start, end, length);
702 }
bellard1ccde1c2004-02-06 19:46:14 +0000703}
704
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000705static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000706{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200707 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000708 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200709 return ret;
aliguori74576192008-10-06 14:02:03 +0000710}
711
Avi Kivitya8170e52012-10-23 12:30:10 +0200712hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200713 MemoryRegionSection *section,
714 target_ulong vaddr,
715 hwaddr paddr, hwaddr xlat,
716 int prot,
717 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000718{
Avi Kivitya8170e52012-10-23 12:30:10 +0200719 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000720 CPUWatchpoint *wp;
721
Blue Swirlcc5bea62012-04-14 14:56:48 +0000722 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000723 /* Normal RAM. */
724 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200725 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000726 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200727 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000728 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200729 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000730 }
731 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200732 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200733 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000734 }
735
736 /* Make accesses to pages with watchpoints go via the
737 watchpoint trap routines. */
738 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
739 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
740 /* Avoid trapping reads of pages with a write breakpoint. */
741 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200742 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000743 *address |= TLB_MMIO;
744 break;
745 }
746 }
747 }
748
749 return iotlb;
750}
bellard9fa3e852004-01-04 18:06:42 +0000751#endif /* defined(CONFIG_USER_ONLY) */
752
pbrooke2eef172008-06-08 01:09:01 +0000753#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000754
Anthony Liguoric227f092009-10-01 16:12:16 -0500755static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200756 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200757static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200758
Avi Kivity5312bd82012-02-12 18:32:55 +0200759static uint16_t phys_section_add(MemoryRegionSection *section)
760{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200761 /* The physical section number is ORed with a page-aligned
762 * pointer to produce the iotlb entries. Thus it should
763 * never overflow into the page-aligned value.
764 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200765 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200766
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200767 if (next_map.sections_nb == next_map.sections_nb_alloc) {
768 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
769 16);
770 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
771 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200772 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200773 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200774 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200775 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200776}
777
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200778static void phys_section_destroy(MemoryRegion *mr)
779{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200780 memory_region_unref(mr);
781
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200782 if (mr->subpage) {
783 subpage_t *subpage = container_of(mr, subpage_t, iomem);
784 memory_region_destroy(&subpage->iomem);
785 g_free(subpage);
786 }
787}
788
Paolo Bonzini60926662013-05-29 12:30:26 +0200789static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200790{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200791 while (map->sections_nb > 0) {
792 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200793 phys_section_destroy(section->mr);
794 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200795 g_free(map->sections);
796 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200797 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200798}
799
Avi Kivityac1970f2012-10-03 16:22:53 +0200800static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200801{
802 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200803 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200804 & TARGET_PAGE_MASK;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200805 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
806 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200807 MemoryRegionSection subsection = {
808 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200809 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200810 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200811 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200812
Avi Kivityf3705d52012-03-08 16:16:34 +0200813 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200814
Avi Kivityf3705d52012-03-08 16:16:34 +0200815 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200816 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200817 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200818 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200819 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200820 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200821 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200822 }
823 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200824 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200825 subpage_register(subpage, start, end, phys_section_add(section));
826}
827
828
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200829static void register_multipage(AddressSpaceDispatch *d,
830 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000831{
Avi Kivitya8170e52012-10-23 12:30:10 +0200832 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200833 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200834 uint64_t num_pages = int128_get64(int128_rshift(section->size,
835 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200836
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200837 assert(num_pages);
838 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000839}
840
Avi Kivityac1970f2012-10-03 16:22:53 +0200841static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200842{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200843 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200844 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200845 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200846 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200847
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200848 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
849 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
850 - now.offset_within_address_space;
851
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200852 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200853 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200854 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200855 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200856 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200857 while (int128_ne(remain.size, now.size)) {
858 remain.size = int128_sub(remain.size, now.size);
859 remain.offset_within_address_space += int128_get64(now.size);
860 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400861 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200862 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200863 register_subpage(d, &now);
864 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200865 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200866 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400867 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200868 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200869 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400870 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200871 }
872}
873
Sheng Yang62a27442010-01-26 19:21:16 +0800874void qemu_flush_coalesced_mmio_buffer(void)
875{
876 if (kvm_enabled())
877 kvm_flush_coalesced_mmio_buffer();
878}
879
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700880void qemu_mutex_lock_ramlist(void)
881{
882 qemu_mutex_lock(&ram_list.mutex);
883}
884
885void qemu_mutex_unlock_ramlist(void)
886{
887 qemu_mutex_unlock(&ram_list.mutex);
888}
889
Marcelo Tosattic9027602010-03-01 20:25:08 -0300890#if defined(__linux__) && !defined(TARGET_S390X)
891
892#include <sys/vfs.h>
893
894#define HUGETLBFS_MAGIC 0x958458f6
895
896static long gethugepagesize(const char *path)
897{
898 struct statfs fs;
899 int ret;
900
901 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900902 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300903 } while (ret != 0 && errno == EINTR);
904
905 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900906 perror(path);
907 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300908 }
909
910 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900911 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300912
913 return fs.f_bsize;
914}
915
Alex Williamson04b16652010-07-02 11:13:17 -0600916static void *file_ram_alloc(RAMBlock *block,
917 ram_addr_t memory,
918 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300919{
920 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500921 char *sanitized_name;
922 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300923 void *area;
924 int fd;
925#ifdef MAP_POPULATE
926 int flags;
927#endif
928 unsigned long hpagesize;
929
930 hpagesize = gethugepagesize(path);
931 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900932 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300933 }
934
935 if (memory < hpagesize) {
936 return NULL;
937 }
938
939 if (kvm_enabled() && !kvm_has_sync_mmu()) {
940 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
941 return NULL;
942 }
943
Peter Feiner8ca761f2013-03-04 13:54:25 -0500944 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
945 sanitized_name = g_strdup(block->mr->name);
946 for (c = sanitized_name; *c != '\0'; c++) {
947 if (*c == '/')
948 *c = '_';
949 }
950
951 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
952 sanitized_name);
953 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300954
955 fd = mkstemp(filename);
956 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900957 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100958 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900959 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300960 }
961 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100962 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300963
964 memory = (memory+hpagesize-1) & ~(hpagesize-1);
965
966 /*
967 * ftruncate is not supported by hugetlbfs in older
968 * hosts, so don't bother bailing out on errors.
969 * If anything goes wrong with it under other filesystems,
970 * mmap will fail.
971 */
972 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900973 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300974
975#ifdef MAP_POPULATE
976 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
977 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
978 * to sidestep this quirk.
979 */
980 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
981 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
982#else
983 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
984#endif
985 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900986 perror("file_ram_alloc: can't mmap RAM pages");
987 close(fd);
988 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300989 }
Alex Williamson04b16652010-07-02 11:13:17 -0600990 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300991 return area;
992}
993#endif
994
Alex Williamsond17b5282010-06-25 11:08:38 -0600995static ram_addr_t find_ram_offset(ram_addr_t size)
996{
Alex Williamson04b16652010-07-02 11:13:17 -0600997 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600998 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600999
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001000 assert(size != 0); /* it would hand out same offset multiple times */
1001
Paolo Bonzinia3161032012-11-14 15:54:48 +01001002 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001003 return 0;
1004
Paolo Bonzinia3161032012-11-14 15:54:48 +01001005 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001006 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001007
1008 end = block->offset + block->length;
1009
Paolo Bonzinia3161032012-11-14 15:54:48 +01001010 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001011 if (next_block->offset >= end) {
1012 next = MIN(next, next_block->offset);
1013 }
1014 }
1015 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001016 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001017 mingap = next - end;
1018 }
1019 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001020
1021 if (offset == RAM_ADDR_MAX) {
1022 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1023 (uint64_t)size);
1024 abort();
1025 }
1026
Alex Williamson04b16652010-07-02 11:13:17 -06001027 return offset;
1028}
1029
Juan Quintela652d7ec2012-07-20 10:37:54 +02001030ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001031{
Alex Williamsond17b5282010-06-25 11:08:38 -06001032 RAMBlock *block;
1033 ram_addr_t last = 0;
1034
Paolo Bonzinia3161032012-11-14 15:54:48 +01001035 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001036 last = MAX(last, block->offset + block->length);
1037
1038 return last;
1039}
1040
Jason Baronddb97f12012-08-02 15:44:16 -04001041static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1042{
1043 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001044
1045 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001046 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1047 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001048 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1049 if (ret) {
1050 perror("qemu_madvise");
1051 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1052 "but dump_guest_core=off specified\n");
1053 }
1054 }
1055}
1056
Avi Kivityc5705a72011-12-20 15:59:12 +02001057void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001058{
1059 RAMBlock *new_block, *block;
1060
Avi Kivityc5705a72011-12-20 15:59:12 +02001061 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001062 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001063 if (block->offset == addr) {
1064 new_block = block;
1065 break;
1066 }
1067 }
1068 assert(new_block);
1069 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001070
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001071 if (dev) {
1072 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001073 if (id) {
1074 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001075 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001076 }
1077 }
1078 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1079
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001080 /* This assumes the iothread lock is taken here too. */
1081 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001082 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001083 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001084 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1085 new_block->idstr);
1086 abort();
1087 }
1088 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001089 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001090}
1091
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001092static int memory_try_enable_merging(void *addr, size_t len)
1093{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001094 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001095 /* disabled by the user */
1096 return 0;
1097 }
1098
1099 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1100}
1101
Avi Kivityc5705a72011-12-20 15:59:12 +02001102ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1103 MemoryRegion *mr)
1104{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001105 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001106
1107 size = TARGET_PAGE_ALIGN(size);
1108 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001109
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001110 /* This assumes the iothread lock is taken here too. */
1111 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001112 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001113 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001114 if (host) {
1115 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001116 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001117 } else {
1118 if (mem_path) {
1119#if defined (__linux__) && !defined(TARGET_S390X)
1120 new_block->host = file_ram_alloc(new_block, size, mem_path);
1121 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001122 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001123 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001124 }
1125#else
1126 fprintf(stderr, "-mem-path option unsupported\n");
1127 exit(1);
1128#endif
1129 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001130 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001131 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001132 } else if (kvm_enabled()) {
1133 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001134 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001135 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001136 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001137 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001138 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001139 }
1140 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001141 new_block->length = size;
1142
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001143 /* Keep the list sorted from biggest to smallest block. */
1144 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1145 if (block->length < new_block->length) {
1146 break;
1147 }
1148 }
1149 if (block) {
1150 QTAILQ_INSERT_BEFORE(block, new_block, next);
1151 } else {
1152 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1153 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001154 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001155
Umesh Deshpandef798b072011-08-18 11:41:17 -07001156 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001157 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001158
Anthony Liguori7267c092011-08-20 22:09:37 -05001159 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001160 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001161 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1162 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001163 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001164
Jason Baronddb97f12012-08-02 15:44:16 -04001165 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001166 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001167
Cam Macdonell84b89d72010-07-26 18:10:57 -06001168 if (kvm_enabled())
1169 kvm_setup_guest_memory(new_block->host, size);
1170
1171 return new_block->offset;
1172}
1173
Avi Kivityc5705a72011-12-20 15:59:12 +02001174ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001175{
Avi Kivityc5705a72011-12-20 15:59:12 +02001176 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001177}
bellarde9a1ab12007-02-08 23:08:38 +00001178
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001179void qemu_ram_free_from_ptr(ram_addr_t addr)
1180{
1181 RAMBlock *block;
1182
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001183 /* This assumes the iothread lock is taken here too. */
1184 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001185 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001186 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001187 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001188 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001189 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001190 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001191 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001192 }
1193 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001194 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001195}
1196
Anthony Liguoric227f092009-10-01 16:12:16 -05001197void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001198{
Alex Williamson04b16652010-07-02 11:13:17 -06001199 RAMBlock *block;
1200
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001201 /* This assumes the iothread lock is taken here too. */
1202 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001203 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001204 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001205 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001206 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001207 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001208 if (block->flags & RAM_PREALLOC_MASK) {
1209 ;
1210 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001211#if defined (__linux__) && !defined(TARGET_S390X)
1212 if (block->fd) {
1213 munmap(block->host, block->length);
1214 close(block->fd);
1215 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001216 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001217 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001218#else
1219 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001220#endif
1221 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001222 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001223 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001224 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001225 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001226 }
Alex Williamson04b16652010-07-02 11:13:17 -06001227 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001228 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001229 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001230 }
1231 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001232 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001233
bellarde9a1ab12007-02-08 23:08:38 +00001234}
1235
Huang Yingcd19cfa2011-03-02 08:56:19 +01001236#ifndef _WIN32
1237void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1238{
1239 RAMBlock *block;
1240 ram_addr_t offset;
1241 int flags;
1242 void *area, *vaddr;
1243
Paolo Bonzinia3161032012-11-14 15:54:48 +01001244 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001245 offset = addr - block->offset;
1246 if (offset < block->length) {
1247 vaddr = block->host + offset;
1248 if (block->flags & RAM_PREALLOC_MASK) {
1249 ;
1250 } else {
1251 flags = MAP_FIXED;
1252 munmap(vaddr, length);
1253 if (mem_path) {
1254#if defined(__linux__) && !defined(TARGET_S390X)
1255 if (block->fd) {
1256#ifdef MAP_POPULATE
1257 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1258 MAP_PRIVATE;
1259#else
1260 flags |= MAP_PRIVATE;
1261#endif
1262 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1263 flags, block->fd, offset);
1264 } else {
1265 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1266 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1267 flags, -1, 0);
1268 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001269#else
1270 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001271#endif
1272 } else {
1273#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1274 flags |= MAP_SHARED | MAP_ANONYMOUS;
1275 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1276 flags, -1, 0);
1277#else
1278 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1279 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1280 flags, -1, 0);
1281#endif
1282 }
1283 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001284 fprintf(stderr, "Could not remap addr: "
1285 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001286 length, addr);
1287 exit(1);
1288 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001289 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001290 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001291 }
1292 return;
1293 }
1294 }
1295}
1296#endif /* !_WIN32 */
1297
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001298static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001299{
pbrook94a6b542009-04-11 17:15:54 +00001300 RAMBlock *block;
1301
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001302 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001303 block = ram_list.mru_block;
1304 if (block && addr - block->offset < block->length) {
1305 goto found;
1306 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001307 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001308 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001309 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001310 }
pbrook94a6b542009-04-11 17:15:54 +00001311 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001312
1313 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1314 abort();
1315
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001316found:
1317 ram_list.mru_block = block;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001318 return block;
1319}
1320
1321/* Return a host pointer to ram allocated with qemu_ram_alloc.
1322 With the exception of the softmmu code in this file, this should
1323 only be used for local memory (e.g. video ram) that the device owns,
1324 and knows it isn't going to access beyond the end of the block.
1325
1326 It should not be used for general purpose DMA.
1327 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1328 */
1329void *qemu_get_ram_ptr(ram_addr_t addr)
1330{
1331 RAMBlock *block = qemu_get_ram_block(addr);
1332
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001333 if (xen_enabled()) {
1334 /* We need to check if the requested address is in the RAM
1335 * because we don't want to map the entire memory in QEMU.
1336 * In that case just map until the end of the page.
1337 */
1338 if (block->offset == 0) {
1339 return xen_map_cache(addr, 0, 0);
1340 } else if (block->host == NULL) {
1341 block->host =
1342 xen_map_cache(block->offset, block->length, 1);
1343 }
1344 }
1345 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001346}
1347
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001348/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1349 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1350 *
1351 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001352 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001353static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001354{
1355 RAMBlock *block;
1356
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001357 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001358 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001359 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001360 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001361 /* We need to check if the requested address is in the RAM
1362 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001363 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001364 */
1365 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001366 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001367 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001368 block->host =
1369 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001370 }
1371 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001372 return block->host + (addr - block->offset);
1373 }
1374 }
1375
1376 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1377 abort();
1378
1379 return NULL;
1380}
1381
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001382/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1383 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001384static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001385{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001386 if (*size == 0) {
1387 return NULL;
1388 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001389 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001390 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001391 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001392 RAMBlock *block;
1393
Paolo Bonzinia3161032012-11-14 15:54:48 +01001394 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001395 if (addr - block->offset < block->length) {
1396 if (addr - block->offset + *size > block->length)
1397 *size = block->length - addr + block->offset;
1398 return block->host + (addr - block->offset);
1399 }
1400 }
1401
1402 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1403 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001404 }
1405}
1406
Paolo Bonzini7443b432013-06-03 12:44:02 +02001407/* Some of the softmmu routines need to translate from a host pointer
1408 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001409MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001410{
pbrook94a6b542009-04-11 17:15:54 +00001411 RAMBlock *block;
1412 uint8_t *host = ptr;
1413
Jan Kiszka868bb332011-06-21 22:59:09 +02001414 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001415 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001416 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001417 }
1418
Paolo Bonzini23887b72013-05-06 14:28:39 +02001419 block = ram_list.mru_block;
1420 if (block && block->host && host - block->host < block->length) {
1421 goto found;
1422 }
1423
Paolo Bonzinia3161032012-11-14 15:54:48 +01001424 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001425 /* This case append when the block is not mapped. */
1426 if (block->host == NULL) {
1427 continue;
1428 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001429 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001430 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001431 }
pbrook94a6b542009-04-11 17:15:54 +00001432 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001433
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001434 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001435
1436found:
1437 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001438 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001439}
Alex Williamsonf471a172010-06-11 11:11:42 -06001440
Avi Kivitya8170e52012-10-23 12:30:10 +02001441static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001442 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001443{
bellard3a7d9292005-08-21 09:26:42 +00001444 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001445 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001446 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001447 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001448 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001449 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001450 switch (size) {
1451 case 1:
1452 stb_p(qemu_get_ram_ptr(ram_addr), val);
1453 break;
1454 case 2:
1455 stw_p(qemu_get_ram_ptr(ram_addr), val);
1456 break;
1457 case 4:
1458 stl_p(qemu_get_ram_ptr(ram_addr), val);
1459 break;
1460 default:
1461 abort();
1462 }
bellardf23db162005-08-21 19:12:28 +00001463 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001464 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001465 /* we remove the notdirty callback only if the code has been
1466 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001467 if (dirty_flags == 0xff) {
1468 CPUArchState *env = current_cpu->env_ptr;
1469 tlb_set_dirty(env, env->mem_io_vaddr);
1470 }
bellard1ccde1c2004-02-06 19:46:14 +00001471}
1472
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001473static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1474 unsigned size, bool is_write)
1475{
1476 return is_write;
1477}
1478
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001479static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001480 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001481 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001482 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001483};
1484
pbrook0f459d12008-06-09 00:20:13 +00001485/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001486static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001487{
Andreas Färber4917cf42013-05-27 05:17:50 +02001488 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001489 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001490 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001491 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001492 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001493
aliguori06d55cc2008-11-18 20:24:06 +00001494 if (env->watchpoint_hit) {
1495 /* We re-entered the check after replacing the TB. Now raise
1496 * the debug interrupt so that is will trigger after the
1497 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001498 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001499 return;
1500 }
pbrook2e70f6e2008-06-29 01:03:05 +00001501 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001502 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001503 if ((vaddr == (wp->vaddr & len_mask) ||
1504 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001505 wp->flags |= BP_WATCHPOINT_HIT;
1506 if (!env->watchpoint_hit) {
1507 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001508 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001509 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1510 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001511 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001512 } else {
1513 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1514 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001515 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001516 }
aliguori06d55cc2008-11-18 20:24:06 +00001517 }
aliguori6e140f22008-11-18 20:37:55 +00001518 } else {
1519 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001520 }
1521 }
1522}
1523
pbrook6658ffb2007-03-16 23:58:11 +00001524/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1525 so these check for a hit then pass through to the normal out-of-line
1526 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001527static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001528 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001529{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001530 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1531 switch (size) {
1532 case 1: return ldub_phys(addr);
1533 case 2: return lduw_phys(addr);
1534 case 4: return ldl_phys(addr);
1535 default: abort();
1536 }
pbrook6658ffb2007-03-16 23:58:11 +00001537}
1538
Avi Kivitya8170e52012-10-23 12:30:10 +02001539static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001540 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001541{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001542 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1543 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001544 case 1:
1545 stb_phys(addr, val);
1546 break;
1547 case 2:
1548 stw_phys(addr, val);
1549 break;
1550 case 4:
1551 stl_phys(addr, val);
1552 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001553 default: abort();
1554 }
pbrook6658ffb2007-03-16 23:58:11 +00001555}
1556
Avi Kivity1ec9b902012-01-02 12:47:48 +02001557static const MemoryRegionOps watch_mem_ops = {
1558 .read = watch_mem_read,
1559 .write = watch_mem_write,
1560 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001561};
pbrook6658ffb2007-03-16 23:58:11 +00001562
Avi Kivitya8170e52012-10-23 12:30:10 +02001563static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001564 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001565{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001566 subpage_t *subpage = opaque;
1567 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001568
blueswir1db7b5422007-05-26 17:36:03 +00001569#if defined(DEBUG_SUBPAGE)
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001570 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1571 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001572#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001573 address_space_read(subpage->as, addr + subpage->base, buf, len);
1574 switch (len) {
1575 case 1:
1576 return ldub_p(buf);
1577 case 2:
1578 return lduw_p(buf);
1579 case 4:
1580 return ldl_p(buf);
1581 default:
1582 abort();
1583 }
blueswir1db7b5422007-05-26 17:36:03 +00001584}
1585
Avi Kivitya8170e52012-10-23 12:30:10 +02001586static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001587 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001588{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001589 subpage_t *subpage = opaque;
1590 uint8_t buf[4];
1591
blueswir1db7b5422007-05-26 17:36:03 +00001592#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001593 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001594 " value %"PRIx64"\n",
1595 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001596#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001597 switch (len) {
1598 case 1:
1599 stb_p(buf, value);
1600 break;
1601 case 2:
1602 stw_p(buf, value);
1603 break;
1604 case 4:
1605 stl_p(buf, value);
1606 break;
1607 default:
1608 abort();
1609 }
1610 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001611}
1612
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001613static bool subpage_accepts(void *opaque, hwaddr addr,
1614 unsigned size, bool is_write)
1615{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001616 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001617#if defined(DEBUG_SUBPAGE)
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001618 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1619 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001620#endif
1621
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001622 return address_space_access_valid(subpage->as, addr + subpage->base,
1623 size, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001624}
1625
Avi Kivity70c68e42012-01-02 12:32:48 +02001626static const MemoryRegionOps subpage_ops = {
1627 .read = subpage_read,
1628 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001629 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001630 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001631};
1632
Anthony Liguoric227f092009-10-01 16:12:16 -05001633static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001634 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001635{
1636 int idx, eidx;
1637
1638 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1639 return -1;
1640 idx = SUBPAGE_IDX(start);
1641 eidx = SUBPAGE_IDX(end);
1642#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001643 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001644 mmio, start, end, idx, eidx, memory);
1645#endif
blueswir1db7b5422007-05-26 17:36:03 +00001646 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001647 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001648 }
1649
1650 return 0;
1651}
1652
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001653static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001654{
Anthony Liguoric227f092009-10-01 16:12:16 -05001655 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001656
Anthony Liguori7267c092011-08-20 22:09:37 -05001657 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001658
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001659 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001660 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001661 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001662 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001663 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001664#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001665 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1666 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001667#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001668 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001669
1670 return mmio;
1671}
1672
Avi Kivity5312bd82012-02-12 18:32:55 +02001673static uint16_t dummy_section(MemoryRegion *mr)
1674{
1675 MemoryRegionSection section = {
1676 .mr = mr,
1677 .offset_within_address_space = 0,
1678 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001679 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001680 };
1681
1682 return phys_section_add(&section);
1683}
1684
Avi Kivitya8170e52012-10-23 12:30:10 +02001685MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001686{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001687 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001688}
1689
Avi Kivitye9179ce2009-06-14 11:38:52 +03001690static void io_mem_init(void)
1691{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001692 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1693 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001694 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001695 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001696 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001697 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001698 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001699}
1700
Avi Kivityac1970f2012-10-03 16:22:53 +02001701static void mem_begin(MemoryListener *listener)
1702{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001703 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001704 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1705
1706 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1707 d->as = as;
1708 as->next_dispatch = d;
1709}
1710
1711static void mem_commit(MemoryListener *listener)
1712{
1713 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001714 AddressSpaceDispatch *cur = as->dispatch;
1715 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001716
Paolo Bonzini0475d942013-05-29 12:28:21 +02001717 next->nodes = next_map.nodes;
1718 next->sections = next_map.sections;
1719
1720 as->dispatch = next;
1721 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001722}
1723
Avi Kivity50c1e142012-02-08 21:36:02 +02001724static void core_begin(MemoryListener *listener)
1725{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001726 uint16_t n;
1727
Paolo Bonzini60926662013-05-29 12:30:26 +02001728 prev_map = g_new(PhysPageMap, 1);
1729 *prev_map = next_map;
1730
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001731 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001732 n = dummy_section(&io_mem_unassigned);
1733 assert(n == PHYS_SECTION_UNASSIGNED);
1734 n = dummy_section(&io_mem_notdirty);
1735 assert(n == PHYS_SECTION_NOTDIRTY);
1736 n = dummy_section(&io_mem_rom);
1737 assert(n == PHYS_SECTION_ROM);
1738 n = dummy_section(&io_mem_watch);
1739 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001740}
1741
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001742/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1743 * All AddressSpaceDispatch instances have switched to the next map.
1744 */
1745static void core_commit(MemoryListener *listener)
1746{
Paolo Bonzini60926662013-05-29 12:30:26 +02001747 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001748}
1749
Avi Kivity1d711482012-10-02 18:54:45 +02001750static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001751{
Andreas Färber182735e2013-05-29 22:29:20 +02001752 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001753
1754 /* since each CPU stores ram addresses in its TLB cache, we must
1755 reset the modified entries */
1756 /* XXX: slow ! */
Andreas Färber182735e2013-05-29 22:29:20 +02001757 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1758 CPUArchState *env = cpu->env_ptr;
1759
Avi Kivity117712c2012-02-12 21:23:17 +02001760 tlb_flush(env, 1);
1761 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001762}
1763
Avi Kivity93632742012-02-08 16:54:16 +02001764static void core_log_global_start(MemoryListener *listener)
1765{
1766 cpu_physical_memory_set_dirty_tracking(1);
1767}
1768
1769static void core_log_global_stop(MemoryListener *listener)
1770{
1771 cpu_physical_memory_set_dirty_tracking(0);
1772}
1773
Avi Kivity93632742012-02-08 16:54:16 +02001774static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001775 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001776 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001777 .log_global_start = core_log_global_start,
1778 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001779 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001780};
1781
Avi Kivity1d711482012-10-02 18:54:45 +02001782static MemoryListener tcg_memory_listener = {
1783 .commit = tcg_commit,
1784};
1785
Avi Kivityac1970f2012-10-03 16:22:53 +02001786void address_space_init_dispatch(AddressSpace *as)
1787{
Paolo Bonzini00752702013-05-29 12:13:54 +02001788 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001789 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001790 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001791 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001792 .region_add = mem_add,
1793 .region_nop = mem_add,
1794 .priority = 0,
1795 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001796 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001797}
1798
Avi Kivity83f3c252012-10-07 12:59:55 +02001799void address_space_destroy_dispatch(AddressSpace *as)
1800{
1801 AddressSpaceDispatch *d = as->dispatch;
1802
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001803 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001804 g_free(d);
1805 as->dispatch = NULL;
1806}
1807
Avi Kivity62152b82011-07-26 14:26:14 +03001808static void memory_map_init(void)
1809{
Anthony Liguori7267c092011-08-20 22:09:37 -05001810 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001811 memory_region_init(system_memory, NULL, "system", INT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001812 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001813
Anthony Liguori7267c092011-08-20 22:09:37 -05001814 system_io = g_malloc(sizeof(*system_io));
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001815 memory_region_init(system_io, NULL, "io", 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001816 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001817
Avi Kivityf6790af2012-10-02 20:13:51 +02001818 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivityf6790af2012-10-02 20:13:51 +02001819 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001820}
1821
1822MemoryRegion *get_system_memory(void)
1823{
1824 return system_memory;
1825}
1826
Avi Kivity309cb472011-08-08 16:09:03 +03001827MemoryRegion *get_system_io(void)
1828{
1829 return system_io;
1830}
1831
pbrooke2eef172008-06-08 01:09:01 +00001832#endif /* !defined(CONFIG_USER_ONLY) */
1833
bellard13eb76e2004-01-24 15:23:36 +00001834/* physical memory access (slow version, mainly for debug) */
1835#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001836int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001837 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001838{
1839 int l, flags;
1840 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001841 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001842
1843 while (len > 0) {
1844 page = addr & TARGET_PAGE_MASK;
1845 l = (page + TARGET_PAGE_SIZE) - addr;
1846 if (l > len)
1847 l = len;
1848 flags = page_get_flags(page);
1849 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001850 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001851 if (is_write) {
1852 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001853 return -1;
bellard579a97f2007-11-11 14:26:47 +00001854 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001855 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001856 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001857 memcpy(p, buf, l);
1858 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001859 } else {
1860 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001861 return -1;
bellard579a97f2007-11-11 14:26:47 +00001862 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001863 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001864 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001865 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001866 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001867 }
1868 len -= l;
1869 buf += l;
1870 addr += l;
1871 }
Paul Brooka68fe892010-03-01 00:08:59 +00001872 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001873}
bellard8df1cd02005-01-28 22:37:22 +00001874
bellard13eb76e2004-01-24 15:23:36 +00001875#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001876
Avi Kivitya8170e52012-10-23 12:30:10 +02001877static void invalidate_and_set_dirty(hwaddr addr,
1878 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001879{
1880 if (!cpu_physical_memory_is_dirty(addr)) {
1881 /* invalidate code */
1882 tb_invalidate_phys_page_range(addr, addr + length, 0);
1883 /* set dirty bit */
1884 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1885 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001886 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001887}
1888
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001889static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1890{
1891 if (memory_region_is_ram(mr)) {
1892 return !(is_write && mr->readonly);
1893 }
1894 if (memory_region_is_romd(mr)) {
1895 return !is_write;
1896 }
1897
1898 return false;
1899}
1900
Richard Henderson23326162013-07-08 14:55:59 -07001901static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001902{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001903 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001904
1905 /* Regions are assumed to support 1-4 byte accesses unless
1906 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001907 if (access_size_max == 0) {
1908 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001909 }
Richard Henderson23326162013-07-08 14:55:59 -07001910
1911 /* Bound the maximum access by the alignment of the address. */
1912 if (!mr->ops->impl.unaligned) {
1913 unsigned align_size_max = addr & -addr;
1914 if (align_size_max != 0 && align_size_max < access_size_max) {
1915 access_size_max = align_size_max;
1916 }
1917 }
1918
1919 /* Don't attempt accesses larger than the maximum. */
1920 if (l > access_size_max) {
1921 l = access_size_max;
1922 }
Richard Henderson23326162013-07-08 14:55:59 -07001923
1924 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001925}
1926
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001927bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001928 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001929{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001930 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001931 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001932 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001933 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001934 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001935 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001936
bellard13eb76e2004-01-24 15:23:36 +00001937 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001938 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001939 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001940
bellard13eb76e2004-01-24 15:23:36 +00001941 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001942 if (!memory_access_is_direct(mr, is_write)) {
1943 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001944 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001945 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001946 switch (l) {
1947 case 8:
1948 /* 64 bit write access */
1949 val = ldq_p(buf);
1950 error |= io_mem_write(mr, addr1, val, 8);
1951 break;
1952 case 4:
bellard1c213d12005-09-03 10:49:04 +00001953 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001954 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001955 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001956 break;
1957 case 2:
bellard1c213d12005-09-03 10:49:04 +00001958 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001959 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001960 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07001961 break;
1962 case 1:
bellard1c213d12005-09-03 10:49:04 +00001963 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001964 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001965 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07001966 break;
1967 default:
1968 abort();
bellard13eb76e2004-01-24 15:23:36 +00001969 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001970 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001971 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00001972 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001973 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001974 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001975 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001976 }
1977 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001978 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001979 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001980 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07001981 switch (l) {
1982 case 8:
1983 /* 64 bit read access */
1984 error |= io_mem_read(mr, addr1, &val, 8);
1985 stq_p(buf, val);
1986 break;
1987 case 4:
bellard13eb76e2004-01-24 15:23:36 +00001988 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001989 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001990 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001991 break;
1992 case 2:
bellard13eb76e2004-01-24 15:23:36 +00001993 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001994 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001995 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001996 break;
1997 case 1:
bellard1c213d12005-09-03 10:49:04 +00001998 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001999 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002000 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002001 break;
2002 default:
2003 abort();
bellard13eb76e2004-01-24 15:23:36 +00002004 }
2005 } else {
2006 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002007 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002008 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002009 }
2010 }
2011 len -= l;
2012 buf += l;
2013 addr += l;
2014 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002015
2016 return error;
bellard13eb76e2004-01-24 15:23:36 +00002017}
bellard8df1cd02005-01-28 22:37:22 +00002018
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002019bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002020 const uint8_t *buf, int len)
2021{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002022 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002023}
2024
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002025bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002026{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002027 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002028}
2029
2030
Avi Kivitya8170e52012-10-23 12:30:10 +02002031void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002032 int len, int is_write)
2033{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002034 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002035}
2036
bellardd0ecd2a2006-04-23 17:14:48 +00002037/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002038void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002039 const uint8_t *buf, int len)
2040{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002041 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002042 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002043 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002044 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002045
bellardd0ecd2a2006-04-23 17:14:48 +00002046 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002047 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002048 mr = address_space_translate(&address_space_memory,
2049 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002050
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002051 if (!(memory_region_is_ram(mr) ||
2052 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002053 /* do nothing */
2054 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002055 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002056 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002057 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002058 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002059 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002060 }
2061 len -= l;
2062 buf += l;
2063 addr += l;
2064 }
2065}
2066
aliguori6d16c2f2009-01-22 16:59:11 +00002067typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002068 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002069 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002070 hwaddr addr;
2071 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002072} BounceBuffer;
2073
2074static BounceBuffer bounce;
2075
aliguoriba223c22009-01-22 16:59:16 +00002076typedef struct MapClient {
2077 void *opaque;
2078 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002079 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002080} MapClient;
2081
Blue Swirl72cf2d42009-09-12 07:36:22 +00002082static QLIST_HEAD(map_client_list, MapClient) map_client_list
2083 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002084
2085void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2086{
Anthony Liguori7267c092011-08-20 22:09:37 -05002087 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002088
2089 client->opaque = opaque;
2090 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002091 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002092 return client;
2093}
2094
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002095static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002096{
2097 MapClient *client = (MapClient *)_client;
2098
Blue Swirl72cf2d42009-09-12 07:36:22 +00002099 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002100 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002101}
2102
2103static void cpu_notify_map_clients(void)
2104{
2105 MapClient *client;
2106
Blue Swirl72cf2d42009-09-12 07:36:22 +00002107 while (!QLIST_EMPTY(&map_client_list)) {
2108 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002109 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002110 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002111 }
2112}
2113
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002114bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2115{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002116 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002117 hwaddr l, xlat;
2118
2119 while (len > 0) {
2120 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002121 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2122 if (!memory_access_is_direct(mr, is_write)) {
2123 l = memory_access_size(mr, l, addr);
2124 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002125 return false;
2126 }
2127 }
2128
2129 len -= l;
2130 addr += l;
2131 }
2132 return true;
2133}
2134
aliguori6d16c2f2009-01-22 16:59:11 +00002135/* Map a physical memory region into a host virtual address.
2136 * May map a subset of the requested range, given by and returned in *plen.
2137 * May return NULL if resources needed to perform the mapping are exhausted.
2138 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002139 * Use cpu_register_map_client() to know when retrying the map operation is
2140 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002141 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002142void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002143 hwaddr addr,
2144 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002145 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002146{
Avi Kivitya8170e52012-10-23 12:30:10 +02002147 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002148 hwaddr done = 0;
2149 hwaddr l, xlat, base;
2150 MemoryRegion *mr, *this_mr;
2151 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002152
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002153 if (len == 0) {
2154 return NULL;
2155 }
aliguori6d16c2f2009-01-22 16:59:11 +00002156
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002157 l = len;
2158 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2159 if (!memory_access_is_direct(mr, is_write)) {
2160 if (bounce.buffer) {
2161 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002162 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002163 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2164 bounce.addr = addr;
2165 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002166
2167 memory_region_ref(mr);
2168 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002169 if (!is_write) {
2170 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002171 }
aliguori6d16c2f2009-01-22 16:59:11 +00002172
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002173 *plen = l;
2174 return bounce.buffer;
2175 }
2176
2177 base = xlat;
2178 raddr = memory_region_get_ram_addr(mr);
2179
2180 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002181 len -= l;
2182 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002183 done += l;
2184 if (len == 0) {
2185 break;
2186 }
2187
2188 l = len;
2189 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2190 if (this_mr != mr || xlat != base + done) {
2191 break;
2192 }
aliguori6d16c2f2009-01-22 16:59:11 +00002193 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002194
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002195 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002196 *plen = done;
2197 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002198}
2199
Avi Kivityac1970f2012-10-03 16:22:53 +02002200/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002201 * Will also mark the memory as dirty if is_write == 1. access_len gives
2202 * the amount of memory that was actually read or written by the caller.
2203 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002204void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2205 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002206{
2207 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002208 MemoryRegion *mr;
2209 ram_addr_t addr1;
2210
2211 mr = qemu_ram_addr_from_host(buffer, &addr1);
2212 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002213 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002214 while (access_len) {
2215 unsigned l;
2216 l = TARGET_PAGE_SIZE;
2217 if (l > access_len)
2218 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002219 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002220 addr1 += l;
2221 access_len -= l;
2222 }
2223 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002224 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002225 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002226 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002227 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002228 return;
2229 }
2230 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002231 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002232 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002233 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002234 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002235 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002236 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002237}
bellardd0ecd2a2006-04-23 17:14:48 +00002238
Avi Kivitya8170e52012-10-23 12:30:10 +02002239void *cpu_physical_memory_map(hwaddr addr,
2240 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002241 int is_write)
2242{
2243 return address_space_map(&address_space_memory, addr, plen, is_write);
2244}
2245
Avi Kivitya8170e52012-10-23 12:30:10 +02002246void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2247 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002248{
2249 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2250}
2251
bellard8df1cd02005-01-28 22:37:22 +00002252/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002253static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002254 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002255{
bellard8df1cd02005-01-28 22:37:22 +00002256 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002257 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002258 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002259 hwaddr l = 4;
2260 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002261
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002262 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2263 false);
2264 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002265 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002266 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002267#if defined(TARGET_WORDS_BIGENDIAN)
2268 if (endian == DEVICE_LITTLE_ENDIAN) {
2269 val = bswap32(val);
2270 }
2271#else
2272 if (endian == DEVICE_BIG_ENDIAN) {
2273 val = bswap32(val);
2274 }
2275#endif
bellard8df1cd02005-01-28 22:37:22 +00002276 } else {
2277 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002278 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002279 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002280 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002281 switch (endian) {
2282 case DEVICE_LITTLE_ENDIAN:
2283 val = ldl_le_p(ptr);
2284 break;
2285 case DEVICE_BIG_ENDIAN:
2286 val = ldl_be_p(ptr);
2287 break;
2288 default:
2289 val = ldl_p(ptr);
2290 break;
2291 }
bellard8df1cd02005-01-28 22:37:22 +00002292 }
2293 return val;
2294}
2295
Avi Kivitya8170e52012-10-23 12:30:10 +02002296uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002297{
2298 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2299}
2300
Avi Kivitya8170e52012-10-23 12:30:10 +02002301uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002302{
2303 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2304}
2305
Avi Kivitya8170e52012-10-23 12:30:10 +02002306uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002307{
2308 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2309}
2310
bellard84b7b8e2005-11-28 21:19:04 +00002311/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002312static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002313 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002314{
bellard84b7b8e2005-11-28 21:19:04 +00002315 uint8_t *ptr;
2316 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002317 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002318 hwaddr l = 8;
2319 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002320
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002321 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2322 false);
2323 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002324 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002325 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002326#if defined(TARGET_WORDS_BIGENDIAN)
2327 if (endian == DEVICE_LITTLE_ENDIAN) {
2328 val = bswap64(val);
2329 }
2330#else
2331 if (endian == DEVICE_BIG_ENDIAN) {
2332 val = bswap64(val);
2333 }
2334#endif
bellard84b7b8e2005-11-28 21:19:04 +00002335 } else {
2336 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002337 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002338 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002339 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002340 switch (endian) {
2341 case DEVICE_LITTLE_ENDIAN:
2342 val = ldq_le_p(ptr);
2343 break;
2344 case DEVICE_BIG_ENDIAN:
2345 val = ldq_be_p(ptr);
2346 break;
2347 default:
2348 val = ldq_p(ptr);
2349 break;
2350 }
bellard84b7b8e2005-11-28 21:19:04 +00002351 }
2352 return val;
2353}
2354
Avi Kivitya8170e52012-10-23 12:30:10 +02002355uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002356{
2357 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2358}
2359
Avi Kivitya8170e52012-10-23 12:30:10 +02002360uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002361{
2362 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2363}
2364
Avi Kivitya8170e52012-10-23 12:30:10 +02002365uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002366{
2367 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2368}
2369
bellardaab33092005-10-30 20:48:42 +00002370/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002371uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002372{
2373 uint8_t val;
2374 cpu_physical_memory_read(addr, &val, 1);
2375 return val;
2376}
2377
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002378/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002379static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002380 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002381{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002382 uint8_t *ptr;
2383 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002384 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002385 hwaddr l = 2;
2386 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002387
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002388 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2389 false);
2390 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002391 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002392 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002393#if defined(TARGET_WORDS_BIGENDIAN)
2394 if (endian == DEVICE_LITTLE_ENDIAN) {
2395 val = bswap16(val);
2396 }
2397#else
2398 if (endian == DEVICE_BIG_ENDIAN) {
2399 val = bswap16(val);
2400 }
2401#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002402 } else {
2403 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002404 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002405 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002406 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002407 switch (endian) {
2408 case DEVICE_LITTLE_ENDIAN:
2409 val = lduw_le_p(ptr);
2410 break;
2411 case DEVICE_BIG_ENDIAN:
2412 val = lduw_be_p(ptr);
2413 break;
2414 default:
2415 val = lduw_p(ptr);
2416 break;
2417 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002418 }
2419 return val;
bellardaab33092005-10-30 20:48:42 +00002420}
2421
Avi Kivitya8170e52012-10-23 12:30:10 +02002422uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002423{
2424 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2425}
2426
Avi Kivitya8170e52012-10-23 12:30:10 +02002427uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002428{
2429 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2430}
2431
Avi Kivitya8170e52012-10-23 12:30:10 +02002432uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002433{
2434 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2435}
2436
bellard8df1cd02005-01-28 22:37:22 +00002437/* warning: addr must be aligned. The ram page is not masked as dirty
2438 and the code inside is not invalidated. It is useful if the dirty
2439 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002440void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002441{
bellard8df1cd02005-01-28 22:37:22 +00002442 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002443 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002444 hwaddr l = 4;
2445 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002446
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002447 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2448 true);
2449 if (l < 4 || !memory_access_is_direct(mr, true)) {
2450 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002451 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002452 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002453 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002454 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002455
2456 if (unlikely(in_migration)) {
2457 if (!cpu_physical_memory_is_dirty(addr1)) {
2458 /* invalidate code */
2459 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2460 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002461 cpu_physical_memory_set_dirty_flags(
2462 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002463 }
2464 }
bellard8df1cd02005-01-28 22:37:22 +00002465 }
2466}
2467
2468/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002469static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002470 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002471{
bellard8df1cd02005-01-28 22:37:22 +00002472 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002473 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002474 hwaddr l = 4;
2475 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002476
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002477 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2478 true);
2479 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002480#if defined(TARGET_WORDS_BIGENDIAN)
2481 if (endian == DEVICE_LITTLE_ENDIAN) {
2482 val = bswap32(val);
2483 }
2484#else
2485 if (endian == DEVICE_BIG_ENDIAN) {
2486 val = bswap32(val);
2487 }
2488#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002489 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002490 } else {
bellard8df1cd02005-01-28 22:37:22 +00002491 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002492 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002493 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002494 switch (endian) {
2495 case DEVICE_LITTLE_ENDIAN:
2496 stl_le_p(ptr, val);
2497 break;
2498 case DEVICE_BIG_ENDIAN:
2499 stl_be_p(ptr, val);
2500 break;
2501 default:
2502 stl_p(ptr, val);
2503 break;
2504 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002505 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002506 }
2507}
2508
Avi Kivitya8170e52012-10-23 12:30:10 +02002509void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002510{
2511 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2512}
2513
Avi Kivitya8170e52012-10-23 12:30:10 +02002514void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002515{
2516 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2517}
2518
Avi Kivitya8170e52012-10-23 12:30:10 +02002519void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002520{
2521 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2522}
2523
bellardaab33092005-10-30 20:48:42 +00002524/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002525void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002526{
2527 uint8_t v = val;
2528 cpu_physical_memory_write(addr, &v, 1);
2529}
2530
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002531/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002532static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002534{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002535 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002536 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002537 hwaddr l = 2;
2538 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002539
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002540 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2541 true);
2542 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002543#if defined(TARGET_WORDS_BIGENDIAN)
2544 if (endian == DEVICE_LITTLE_ENDIAN) {
2545 val = bswap16(val);
2546 }
2547#else
2548 if (endian == DEVICE_BIG_ENDIAN) {
2549 val = bswap16(val);
2550 }
2551#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002552 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002553 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002554 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002555 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002556 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002557 switch (endian) {
2558 case DEVICE_LITTLE_ENDIAN:
2559 stw_le_p(ptr, val);
2560 break;
2561 case DEVICE_BIG_ENDIAN:
2562 stw_be_p(ptr, val);
2563 break;
2564 default:
2565 stw_p(ptr, val);
2566 break;
2567 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002568 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002569 }
bellardaab33092005-10-30 20:48:42 +00002570}
2571
Avi Kivitya8170e52012-10-23 12:30:10 +02002572void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002573{
2574 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2575}
2576
Avi Kivitya8170e52012-10-23 12:30:10 +02002577void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002578{
2579 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2580}
2581
Avi Kivitya8170e52012-10-23 12:30:10 +02002582void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002583{
2584 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2585}
2586
bellardaab33092005-10-30 20:48:42 +00002587/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002588void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002589{
2590 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002591 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002592}
2593
Avi Kivitya8170e52012-10-23 12:30:10 +02002594void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002595{
2596 val = cpu_to_le64(val);
2597 cpu_physical_memory_write(addr, &val, 8);
2598}
2599
Avi Kivitya8170e52012-10-23 12:30:10 +02002600void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002601{
2602 val = cpu_to_be64(val);
2603 cpu_physical_memory_write(addr, &val, 8);
2604}
2605
aliguori5e2972f2009-03-28 17:51:36 +00002606/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002607int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002608 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002609{
2610 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002611 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002612 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002613
2614 while (len > 0) {
2615 page = addr & TARGET_PAGE_MASK;
2616 phys_addr = cpu_get_phys_page_debug(env, page);
2617 /* if no physical page mapped, return an error */
2618 if (phys_addr == -1)
2619 return -1;
2620 l = (page + TARGET_PAGE_SIZE) - addr;
2621 if (l > len)
2622 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002623 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002624 if (is_write)
2625 cpu_physical_memory_write_rom(phys_addr, buf, l);
2626 else
aliguori5e2972f2009-03-28 17:51:36 +00002627 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002628 len -= l;
2629 buf += l;
2630 addr += l;
2631 }
2632 return 0;
2633}
Paul Brooka68fe892010-03-01 00:08:59 +00002634#endif
bellard13eb76e2004-01-24 15:23:36 +00002635
Blue Swirl8e4a4242013-01-06 18:30:17 +00002636#if !defined(CONFIG_USER_ONLY)
2637
2638/*
2639 * A helper function for the _utterly broken_ virtio device model to find out if
2640 * it's running on a big endian machine. Don't do this at home kids!
2641 */
2642bool virtio_is_big_endian(void);
2643bool virtio_is_big_endian(void)
2644{
2645#if defined(TARGET_WORDS_BIGENDIAN)
2646 return true;
2647#else
2648 return false;
2649#endif
2650}
2651
2652#endif
2653
Wen Congyang76f35532012-05-07 12:04:18 +08002654#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002655bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002656{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002657 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002658 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002659
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002660 mr = address_space_translate(&address_space_memory,
2661 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002662
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002663 return !(memory_region_is_ram(mr) ||
2664 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002665}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002666
2667void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2668{
2669 RAMBlock *block;
2670
2671 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2672 func(block->host, block->offset, block->length, opaque);
2673 }
2674}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002675#endif