blob: 598ac3a012882ff00813fb5190b4ec0648b440be [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
blueswir1db7b5422007-05-26 17:36:03 +000053//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000054
pbrook99773bd2006-04-16 15:14:59 +000055#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000056int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020066
Paolo Bonzini0844e002013-05-24 14:37:28 +020067MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020068static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069
pbrooke2eef172008-06-08 01:09:01 +000070#endif
bellard9fa3e852004-01-04 18:06:42 +000071
Andreas Färber9349b4f2012-03-14 01:38:32 +010072CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000073/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010075DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000076/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000077 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000078 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010079int use_icount;
bellard6a00d602005-11-21 23:25:50 +000080
pbrooke2eef172008-06-08 01:09:01 +000081#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020082
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020083typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
91struct AddressSpaceDispatch {
92 /* This is a multi-level map on the physical address space.
93 * The bottom level has pointers to MemoryRegionSections.
94 */
95 PhysPageEntry phys_map;
96 MemoryListener listener;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020097 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020098};
99
Jan Kiszka90260c62013-05-26 21:46:51 +0200100#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
101typedef struct subpage_t {
102 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200103 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200104 hwaddr base;
105 uint16_t sub_section[TARGET_PAGE_SIZE];
106} subpage_t;
107
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200108#define PHYS_SECTION_UNASSIGNED 0
109#define PHYS_SECTION_NOTDIRTY 1
110#define PHYS_SECTION_ROM 2
111#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200112
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200113typedef PhysPageEntry Node[L2_SIZE];
114
115typedef struct PhysPageMap {
116 unsigned sections_nb;
117 unsigned sections_nb_alloc;
118 unsigned nodes_nb;
119 unsigned nodes_nb_alloc;
120 Node *nodes;
121 MemoryRegionSection *sections;
122} PhysPageMap;
123
124static PhysPageMap cur_map;
125static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200126
Avi Kivity07f07b32012-02-13 20:45:32 +0200127#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200128
pbrooke2eef172008-06-08 01:09:01 +0000129static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300130static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000131static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000132
Avi Kivity1ec9b902012-01-02 12:47:48 +0200133static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000134#endif
bellard54936002003-05-13 00:25:15 +0000135
Paul Brook6d9a1302010-02-28 23:55:53 +0000136#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200137
Avi Kivityf7bf5462012-02-13 20:12:05 +0200138static void phys_map_node_reserve(unsigned nodes)
139{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147 }
148}
149
150static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200151{
152 unsigned i;
153 uint16_t ret;
154
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200155 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200156 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200157 assert(ret != next_map.nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200158 for (i = 0; i < L2_SIZE; ++i) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200161 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200162 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200163}
164
Avi Kivitya8170e52012-10-23 12:30:10 +0200165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200167 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168{
169 PhysPageEntry *p;
170 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172
Avi Kivity07f07b32012-02-13 20:45:32 +0200173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200174 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200175 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200178 p[i].is_leaf = 1;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200180 }
181 }
182 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200183 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184 }
Avi Kivity29990972012-02-13 20:21:20 +0200185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186
Avi Kivity29990972012-02-13 20:21:20 +0200187 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200190 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200191 *index += step;
192 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200197 }
198}
199
Avi Kivityac1970f2012-10-03 16:22:53 +0200200static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200201 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200202 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000203{
Avi Kivity29990972012-02-13 20:21:20 +0200204 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200205 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000206
Avi Kivityac1970f2012-10-03 16:22:53 +0200207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000208}
209
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000212{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200213 PhysPageEntry *p;
214 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200215
Avi Kivity07f07b32012-02-13 20:45:32 +0200216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200218 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200219 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200220 p = nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200222 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200223 return &sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200224}
225
Blue Swirle5548612012-04-21 13:08:33 +0000226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000229 && mr != &io_mem_watch;
230}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200231
Jan Kiszka9f029602013-05-06 16:48:02 +0200232static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
Jan Kiszka90260c62013-05-26 21:46:51 +0200233 hwaddr addr,
234 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200235{
Jan Kiszka90260c62013-05-26 21:46:51 +0200236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200239 section = phys_page_find(as->dispatch->phys_map, addr >> TARGET_PAGE_BITS,
240 cur_map.nodes, cur_map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200243 section = &cur_map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200244 }
245 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200246}
247
Jan Kiszka90260c62013-05-26 21:46:51 +0200248static MemoryRegionSection *
249address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
250 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
Jan Kiszka90260c62013-05-26 21:46:51 +0200255 section = address_space_lookup_region(as, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200264 return section;
265}
Jan Kiszka90260c62013-05-26 21:46:51 +0200266
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200270{
Avi Kivity30951152012-10-30 13:47:46 +0200271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
277 section = address_space_translate_internal(as, addr, &addr, plen, true);
278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
Avi Kivity30951152012-10-30 13:47:46 +0200305 MemoryRegionSection *section;
306 section = address_space_translate_internal(as, addr, xlat, plen, false);
307
308 assert(!section->mr->iommu_ops);
309 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200310}
bellard9fa3e852004-01-04 18:06:42 +0000311#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000312
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200313void cpu_exec_init_all(void)
314{
315#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700316 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200317 memory_map_init();
318 io_mem_init();
319#endif
320}
321
Andreas Färberb170fce2013-01-20 20:23:22 +0100322#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000323
Juan Quintelae59fb372009-09-29 22:48:21 +0200324static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200325{
Andreas Färber259186a2013-01-17 18:51:17 +0100326 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200327
aurel323098dba2009-03-07 21:28:24 +0000328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000332
333 return 0;
334}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200335
Andreas Färber1a1562f2013-06-17 04:09:11 +0200336const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200345 VMSTATE_END_OF_LIST()
346 }
347};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200348
pbrook9656f322008-07-01 20:01:19 +0000349#endif
350
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100351CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400352{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100353 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100354 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400355
356 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100357 cpu = ENV_GET_CPU(env);
358 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400359 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100360 }
Glauber Costa950f1472009-06-09 12:15:18 -0400361 env = env->next_cpu;
362 }
363
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100364 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400365}
366
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200367void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
368{
369 CPUArchState *env = first_cpu;
370
371 while (env) {
372 func(ENV_GET_CPU(env), data);
373 env = env->next_cpu;
374 }
375}
376
Andreas Färber9349b4f2012-03-14 01:38:32 +0100377void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000378{
Andreas Färber9f09e182012-05-03 06:59:07 +0200379 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100380 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100381 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000382 int cpu_index;
383
pbrookc2764712009-03-07 15:24:59 +0000384#if defined(CONFIG_USER_ONLY)
385 cpu_list_lock();
386#endif
bellard6a00d602005-11-21 23:25:50 +0000387 env->next_cpu = NULL;
388 penv = &first_cpu;
389 cpu_index = 0;
390 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700391 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000392 cpu_index++;
393 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100394 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100395 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000396 QTAILQ_INIT(&env->breakpoints);
397 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100398#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200399 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100400#endif
bellard6a00d602005-11-21 23:25:50 +0000401 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000402#if defined(CONFIG_USER_ONLY)
403 cpu_list_unlock();
404#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100405 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000406#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600407 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000408 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100409 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000410#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100411 if (cc->vmsd != NULL) {
412 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
413 }
bellardfd6ce8f2003-05-14 19:00:11 +0000414}
415
bellard1fddef42005-04-17 19:16:13 +0000416#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000417#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100418static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000419{
420 tb_invalidate_phys_page_range(pc, pc + 1, 0);
421}
422#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400423static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
424{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400425 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
426 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400427}
bellardc27004e2005-01-03 23:35:10 +0000428#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000429#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000430
Paul Brookc527ee82010-03-01 03:31:14 +0000431#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100432void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000433
434{
435}
436
Andreas Färber9349b4f2012-03-14 01:38:32 +0100437int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000438 int flags, CPUWatchpoint **watchpoint)
439{
440 return -ENOSYS;
441}
442#else
pbrook6658ffb2007-03-16 23:58:11 +0000443/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100444int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000445 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000446{
aliguorib4051332008-11-18 20:14:20 +0000447 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000448 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000449
aliguorib4051332008-11-18 20:14:20 +0000450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400451 if ((len & (len - 1)) || (addr & ~len_mask) ||
452 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000453 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
454 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
455 return -EINVAL;
456 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500457 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000458
aliguoria1d1bb32008-11-18 20:07:32 +0000459 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000460 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000461 wp->flags = flags;
462
aliguori2dc9f412008-11-18 20:56:59 +0000463 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000464 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000465 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000466 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000467 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000468
pbrook6658ffb2007-03-16 23:58:11 +0000469 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000470
471 if (watchpoint)
472 *watchpoint = wp;
473 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000474}
475
aliguoria1d1bb32008-11-18 20:07:32 +0000476/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100477int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000478 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000479{
aliguorib4051332008-11-18 20:14:20 +0000480 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000481 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000482
Blue Swirl72cf2d42009-09-12 07:36:22 +0000483 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000484 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000485 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000486 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000487 return 0;
488 }
489 }
aliguoria1d1bb32008-11-18 20:07:32 +0000490 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000491}
492
aliguoria1d1bb32008-11-18 20:07:32 +0000493/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100494void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000495{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000496 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000497
aliguoria1d1bb32008-11-18 20:07:32 +0000498 tlb_flush_page(env, watchpoint->vaddr);
499
Anthony Liguori7267c092011-08-20 22:09:37 -0500500 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000501}
502
aliguoria1d1bb32008-11-18 20:07:32 +0000503/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100504void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000505{
aliguoric0ce9982008-11-25 22:13:57 +0000506 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000507
Blue Swirl72cf2d42009-09-12 07:36:22 +0000508 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000509 if (wp->flags & mask)
510 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000511 }
aliguoria1d1bb32008-11-18 20:07:32 +0000512}
Paul Brookc527ee82010-03-01 03:31:14 +0000513#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000514
515/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100516int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000517 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000518{
bellard1fddef42005-04-17 19:16:13 +0000519#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000520 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000521
Anthony Liguori7267c092011-08-20 22:09:37 -0500522 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000523
524 bp->pc = pc;
525 bp->flags = flags;
526
aliguori2dc9f412008-11-18 20:56:59 +0000527 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000528 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000529 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000530 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000531 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000532
533 breakpoint_invalidate(env, pc);
534
535 if (breakpoint)
536 *breakpoint = bp;
537 return 0;
538#else
539 return -ENOSYS;
540#endif
541}
542
543/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100544int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000545{
546#if defined(TARGET_HAS_ICE)
547 CPUBreakpoint *bp;
548
Blue Swirl72cf2d42009-09-12 07:36:22 +0000549 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000550 if (bp->pc == pc && bp->flags == flags) {
551 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000552 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000553 }
bellard4c3a88a2003-07-26 12:06:08 +0000554 }
aliguoria1d1bb32008-11-18 20:07:32 +0000555 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000556#else
aliguoria1d1bb32008-11-18 20:07:32 +0000557 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000558#endif
559}
560
aliguoria1d1bb32008-11-18 20:07:32 +0000561/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100562void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000563{
bellard1fddef42005-04-17 19:16:13 +0000564#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000566
aliguoria1d1bb32008-11-18 20:07:32 +0000567 breakpoint_invalidate(env, breakpoint->pc);
568
Anthony Liguori7267c092011-08-20 22:09:37 -0500569 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000570#endif
571}
572
573/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100574void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000575{
576#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000577 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000578
Blue Swirl72cf2d42009-09-12 07:36:22 +0000579 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000580 if (bp->flags & mask)
581 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000582 }
bellard4c3a88a2003-07-26 12:06:08 +0000583#endif
584}
585
bellardc33a3462003-07-29 20:50:33 +0000586/* enable or disable single step mode. EXCP_DEBUG is returned by the
587 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100588void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000589{
bellard1fddef42005-04-17 19:16:13 +0000590#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000591 if (env->singlestep_enabled != enabled) {
592 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000593 if (kvm_enabled())
594 kvm_update_guest_debug(env, 0);
595 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100596 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000597 /* XXX: only flush what is necessary */
598 tb_flush(env);
599 }
bellardc33a3462003-07-29 20:50:33 +0000600 }
601#endif
602}
603
Andreas Färber9349b4f2012-03-14 01:38:32 +0100604void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000605{
Andreas Färber878096e2013-05-27 01:33:50 +0200606 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000607 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000608 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000609
610 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000611 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000612 fprintf(stderr, "qemu: fatal: ");
613 vfprintf(stderr, fmt, ap);
614 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200615 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000616 if (qemu_log_enabled()) {
617 qemu_log("qemu: fatal: ");
618 qemu_log_vprintf(fmt, ap2);
619 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100620 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000621 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000622 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000623 }
pbrook493ae1f2007-11-23 16:53:59 +0000624 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000625 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200626#if defined(CONFIG_USER_ONLY)
627 {
628 struct sigaction act;
629 sigfillset(&act.sa_mask);
630 act.sa_handler = SIG_DFL;
631 sigaction(SIGABRT, &act, NULL);
632 }
633#endif
bellard75012672003-06-21 13:11:07 +0000634 abort();
635}
636
Andreas Färber9349b4f2012-03-14 01:38:32 +0100637CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000638{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100639 CPUArchState *new_env = cpu_init(env->cpu_model_str);
640 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000641#if defined(TARGET_HAS_ICE)
642 CPUBreakpoint *bp;
643 CPUWatchpoint *wp;
644#endif
645
Andreas Färber9349b4f2012-03-14 01:38:32 +0100646 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000647
Andreas Färber55e5c282012-12-17 06:18:02 +0100648 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000649 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000650
651 /* Clone all break/watchpoints.
652 Note: Once we support ptrace with hw-debug register access, make sure
653 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000654 QTAILQ_INIT(&env->breakpoints);
655 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000656#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000657 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000658 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
659 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000660 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000661 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
662 wp->flags, NULL);
663 }
664#endif
665
thsc5be9f02007-02-28 20:20:53 +0000666 return new_env;
667}
668
bellard01243112004-01-04 15:48:17 +0000669#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200670static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
671 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000672{
Juan Quintelad24981d2012-05-22 00:42:40 +0200673 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000674
bellard1ccde1c2004-02-06 19:46:14 +0000675 /* we modify the TLB cache so that the dirty bit will be set again
676 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200677 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200678 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000679 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200680 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000681 != (end - 1) - start) {
682 abort();
683 }
Blue Swirle5548612012-04-21 13:08:33 +0000684 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200685
686}
687
688/* Note: start and end must be within the same ram block. */
689void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
690 int dirty_flags)
691{
692 uintptr_t length;
693
694 start &= TARGET_PAGE_MASK;
695 end = TARGET_PAGE_ALIGN(end);
696
697 length = end - start;
698 if (length == 0)
699 return;
700 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
701
702 if (tcg_enabled()) {
703 tlb_reset_dirty_range_all(start, end, length);
704 }
bellard1ccde1c2004-02-06 19:46:14 +0000705}
706
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000707static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000708{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200709 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000710 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200711 return ret;
aliguori74576192008-10-06 14:02:03 +0000712}
713
Avi Kivitya8170e52012-10-23 12:30:10 +0200714hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200715 MemoryRegionSection *section,
716 target_ulong vaddr,
717 hwaddr paddr, hwaddr xlat,
718 int prot,
719 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000720{
Avi Kivitya8170e52012-10-23 12:30:10 +0200721 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000722 CPUWatchpoint *wp;
723
Blue Swirlcc5bea62012-04-14 14:56:48 +0000724 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000725 /* Normal RAM. */
726 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200727 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000728 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200729 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000730 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200731 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000732 }
733 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200734 iotlb = section - cur_map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200735 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000736 }
737
738 /* Make accesses to pages with watchpoints go via the
739 watchpoint trap routines. */
740 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
741 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
742 /* Avoid trapping reads of pages with a write breakpoint. */
743 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200744 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000745 *address |= TLB_MMIO;
746 break;
747 }
748 }
749 }
750
751 return iotlb;
752}
bellard9fa3e852004-01-04 18:06:42 +0000753#endif /* defined(CONFIG_USER_ONLY) */
754
pbrooke2eef172008-06-08 01:09:01 +0000755#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000756
Anthony Liguoric227f092009-10-01 16:12:16 -0500757static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200758 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200759static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200760
Avi Kivity5312bd82012-02-12 18:32:55 +0200761static uint16_t phys_section_add(MemoryRegionSection *section)
762{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200763 /* The physical section number is ORed with a page-aligned
764 * pointer to produce the iotlb entries. Thus it should
765 * never overflow into the page-aligned value.
766 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200767 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200768
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200769 if (next_map.sections_nb == next_map.sections_nb_alloc) {
770 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
771 16);
772 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
773 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200774 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200775 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200776 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200777 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200778}
779
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200780static void phys_section_destroy(MemoryRegion *mr)
781{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200782 memory_region_unref(mr);
783
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200784 if (mr->subpage) {
785 subpage_t *subpage = container_of(mr, subpage_t, iomem);
786 memory_region_destroy(&subpage->iomem);
787 g_free(subpage);
788 }
789}
790
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200791static void phys_sections_clear(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200792{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200793 while (map->sections_nb > 0) {
794 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200795 phys_section_destroy(section->mr);
796 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200797 g_free(map->sections);
798 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200799}
800
Avi Kivityac1970f2012-10-03 16:22:53 +0200801static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200802{
803 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200804 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200805 & TARGET_PAGE_MASK;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200806 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
807 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200808 MemoryRegionSection subsection = {
809 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200810 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200811 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200812 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200813
Avi Kivityf3705d52012-03-08 16:16:34 +0200814 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200815
Avi Kivityf3705d52012-03-08 16:16:34 +0200816 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200817 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200818 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200819 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200820 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200821 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200822 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200823 }
824 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200825 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200826 subpage_register(subpage, start, end, phys_section_add(section));
827}
828
829
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200830static void register_multipage(AddressSpaceDispatch *d,
831 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000832{
Avi Kivitya8170e52012-10-23 12:30:10 +0200833 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200834 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200835 uint64_t num_pages = int128_get64(int128_rshift(section->size,
836 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200837
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200838 assert(num_pages);
839 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000840}
841
Avi Kivityac1970f2012-10-03 16:22:53 +0200842static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200843{
Avi Kivityac1970f2012-10-03 16:22:53 +0200844 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200845 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200846 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200847
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200848 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
849 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
850 - now.offset_within_address_space;
851
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200852 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200853 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200854 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200855 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200856 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200857 while (int128_ne(remain.size, now.size)) {
858 remain.size = int128_sub(remain.size, now.size);
859 remain.offset_within_address_space += int128_get64(now.size);
860 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400861 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200862 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200863 register_subpage(d, &now);
864 } else if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200865 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200866 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400867 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200868 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200869 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400870 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200871 }
872}
873
Sheng Yang62a27442010-01-26 19:21:16 +0800874void qemu_flush_coalesced_mmio_buffer(void)
875{
876 if (kvm_enabled())
877 kvm_flush_coalesced_mmio_buffer();
878}
879
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700880void qemu_mutex_lock_ramlist(void)
881{
882 qemu_mutex_lock(&ram_list.mutex);
883}
884
885void qemu_mutex_unlock_ramlist(void)
886{
887 qemu_mutex_unlock(&ram_list.mutex);
888}
889
Marcelo Tosattic9027602010-03-01 20:25:08 -0300890#if defined(__linux__) && !defined(TARGET_S390X)
891
892#include <sys/vfs.h>
893
894#define HUGETLBFS_MAGIC 0x958458f6
895
896static long gethugepagesize(const char *path)
897{
898 struct statfs fs;
899 int ret;
900
901 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900902 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300903 } while (ret != 0 && errno == EINTR);
904
905 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900906 perror(path);
907 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300908 }
909
910 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900911 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300912
913 return fs.f_bsize;
914}
915
Alex Williamson04b16652010-07-02 11:13:17 -0600916static void *file_ram_alloc(RAMBlock *block,
917 ram_addr_t memory,
918 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300919{
920 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500921 char *sanitized_name;
922 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300923 void *area;
924 int fd;
925#ifdef MAP_POPULATE
926 int flags;
927#endif
928 unsigned long hpagesize;
929
930 hpagesize = gethugepagesize(path);
931 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900932 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300933 }
934
935 if (memory < hpagesize) {
936 return NULL;
937 }
938
939 if (kvm_enabled() && !kvm_has_sync_mmu()) {
940 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
941 return NULL;
942 }
943
Peter Feiner8ca761f2013-03-04 13:54:25 -0500944 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
945 sanitized_name = g_strdup(block->mr->name);
946 for (c = sanitized_name; *c != '\0'; c++) {
947 if (*c == '/')
948 *c = '_';
949 }
950
951 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
952 sanitized_name);
953 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300954
955 fd = mkstemp(filename);
956 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900957 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100958 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900959 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300960 }
961 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100962 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300963
964 memory = (memory+hpagesize-1) & ~(hpagesize-1);
965
966 /*
967 * ftruncate is not supported by hugetlbfs in older
968 * hosts, so don't bother bailing out on errors.
969 * If anything goes wrong with it under other filesystems,
970 * mmap will fail.
971 */
972 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900973 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300974
975#ifdef MAP_POPULATE
976 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
977 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
978 * to sidestep this quirk.
979 */
980 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
981 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
982#else
983 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
984#endif
985 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900986 perror("file_ram_alloc: can't mmap RAM pages");
987 close(fd);
988 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300989 }
Alex Williamson04b16652010-07-02 11:13:17 -0600990 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300991 return area;
992}
993#endif
994
Alex Williamsond17b5282010-06-25 11:08:38 -0600995static ram_addr_t find_ram_offset(ram_addr_t size)
996{
Alex Williamson04b16652010-07-02 11:13:17 -0600997 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600998 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600999
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001000 assert(size != 0); /* it would hand out same offset multiple times */
1001
Paolo Bonzinia3161032012-11-14 15:54:48 +01001002 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001003 return 0;
1004
Paolo Bonzinia3161032012-11-14 15:54:48 +01001005 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001006 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001007
1008 end = block->offset + block->length;
1009
Paolo Bonzinia3161032012-11-14 15:54:48 +01001010 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001011 if (next_block->offset >= end) {
1012 next = MIN(next, next_block->offset);
1013 }
1014 }
1015 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001016 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001017 mingap = next - end;
1018 }
1019 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001020
1021 if (offset == RAM_ADDR_MAX) {
1022 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1023 (uint64_t)size);
1024 abort();
1025 }
1026
Alex Williamson04b16652010-07-02 11:13:17 -06001027 return offset;
1028}
1029
Juan Quintela652d7ec2012-07-20 10:37:54 +02001030ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001031{
Alex Williamsond17b5282010-06-25 11:08:38 -06001032 RAMBlock *block;
1033 ram_addr_t last = 0;
1034
Paolo Bonzinia3161032012-11-14 15:54:48 +01001035 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001036 last = MAX(last, block->offset + block->length);
1037
1038 return last;
1039}
1040
Jason Baronddb97f12012-08-02 15:44:16 -04001041static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1042{
1043 int ret;
1044 QemuOpts *machine_opts;
1045
1046 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1047 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1048 if (machine_opts &&
1049 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1050 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1051 if (ret) {
1052 perror("qemu_madvise");
1053 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1054 "but dump_guest_core=off specified\n");
1055 }
1056 }
1057}
1058
Avi Kivityc5705a72011-12-20 15:59:12 +02001059void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001060{
1061 RAMBlock *new_block, *block;
1062
Avi Kivityc5705a72011-12-20 15:59:12 +02001063 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001064 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001065 if (block->offset == addr) {
1066 new_block = block;
1067 break;
1068 }
1069 }
1070 assert(new_block);
1071 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001072
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001073 if (dev) {
1074 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001075 if (id) {
1076 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001077 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001078 }
1079 }
1080 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1081
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001082 /* This assumes the iothread lock is taken here too. */
1083 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001084 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001085 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001086 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1087 new_block->idstr);
1088 abort();
1089 }
1090 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001091 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001092}
1093
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001094static int memory_try_enable_merging(void *addr, size_t len)
1095{
1096 QemuOpts *opts;
1097
1098 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1099 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1100 /* disabled by the user */
1101 return 0;
1102 }
1103
1104 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1105}
1106
Avi Kivityc5705a72011-12-20 15:59:12 +02001107ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1108 MemoryRegion *mr)
1109{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001110 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001111
1112 size = TARGET_PAGE_ALIGN(size);
1113 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001114
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001115 /* This assumes the iothread lock is taken here too. */
1116 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001117 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001118 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001119 if (host) {
1120 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001121 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001122 } else {
1123 if (mem_path) {
1124#if defined (__linux__) && !defined(TARGET_S390X)
1125 new_block->host = file_ram_alloc(new_block, size, mem_path);
1126 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001127 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001128 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001129 }
1130#else
1131 fprintf(stderr, "-mem-path option unsupported\n");
1132 exit(1);
1133#endif
1134 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001135 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001136 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001137 } else if (kvm_enabled()) {
1138 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001139 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001140 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001141 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001142 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001143 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001144 }
1145 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001146 new_block->length = size;
1147
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001148 /* Keep the list sorted from biggest to smallest block. */
1149 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1150 if (block->length < new_block->length) {
1151 break;
1152 }
1153 }
1154 if (block) {
1155 QTAILQ_INSERT_BEFORE(block, new_block, next);
1156 } else {
1157 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1158 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001159 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001160
Umesh Deshpandef798b072011-08-18 11:41:17 -07001161 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001162 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001163
Anthony Liguori7267c092011-08-20 22:09:37 -05001164 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001165 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001166 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1167 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001168 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001169
Jason Baronddb97f12012-08-02 15:44:16 -04001170 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001171 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001172
Cam Macdonell84b89d72010-07-26 18:10:57 -06001173 if (kvm_enabled())
1174 kvm_setup_guest_memory(new_block->host, size);
1175
1176 return new_block->offset;
1177}
1178
Avi Kivityc5705a72011-12-20 15:59:12 +02001179ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001180{
Avi Kivityc5705a72011-12-20 15:59:12 +02001181 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001182}
bellarde9a1ab12007-02-08 23:08:38 +00001183
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001184void qemu_ram_free_from_ptr(ram_addr_t addr)
1185{
1186 RAMBlock *block;
1187
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001188 /* This assumes the iothread lock is taken here too. */
1189 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001190 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001191 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001192 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001193 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001194 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001195 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001196 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001197 }
1198 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001199 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001200}
1201
Anthony Liguoric227f092009-10-01 16:12:16 -05001202void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001203{
Alex Williamson04b16652010-07-02 11:13:17 -06001204 RAMBlock *block;
1205
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001206 /* This assumes the iothread lock is taken here too. */
1207 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001209 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001210 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001211 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001212 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001213 if (block->flags & RAM_PREALLOC_MASK) {
1214 ;
1215 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001216#if defined (__linux__) && !defined(TARGET_S390X)
1217 if (block->fd) {
1218 munmap(block->host, block->length);
1219 close(block->fd);
1220 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001221 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001222 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001223#else
1224 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001225#endif
1226 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001227 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001228 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001229 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001230 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001231 }
Alex Williamson04b16652010-07-02 11:13:17 -06001232 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001233 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001234 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001235 }
1236 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001237 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001238
bellarde9a1ab12007-02-08 23:08:38 +00001239}
1240
Huang Yingcd19cfa2011-03-02 08:56:19 +01001241#ifndef _WIN32
1242void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1243{
1244 RAMBlock *block;
1245 ram_addr_t offset;
1246 int flags;
1247 void *area, *vaddr;
1248
Paolo Bonzinia3161032012-11-14 15:54:48 +01001249 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001250 offset = addr - block->offset;
1251 if (offset < block->length) {
1252 vaddr = block->host + offset;
1253 if (block->flags & RAM_PREALLOC_MASK) {
1254 ;
1255 } else {
1256 flags = MAP_FIXED;
1257 munmap(vaddr, length);
1258 if (mem_path) {
1259#if defined(__linux__) && !defined(TARGET_S390X)
1260 if (block->fd) {
1261#ifdef MAP_POPULATE
1262 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1263 MAP_PRIVATE;
1264#else
1265 flags |= MAP_PRIVATE;
1266#endif
1267 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1268 flags, block->fd, offset);
1269 } else {
1270 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1271 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1272 flags, -1, 0);
1273 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001274#else
1275 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001276#endif
1277 } else {
1278#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1279 flags |= MAP_SHARED | MAP_ANONYMOUS;
1280 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1281 flags, -1, 0);
1282#else
1283 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1284 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1285 flags, -1, 0);
1286#endif
1287 }
1288 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001289 fprintf(stderr, "Could not remap addr: "
1290 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001291 length, addr);
1292 exit(1);
1293 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001294 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001295 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001296 }
1297 return;
1298 }
1299 }
1300}
1301#endif /* !_WIN32 */
1302
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001303static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001304{
pbrook94a6b542009-04-11 17:15:54 +00001305 RAMBlock *block;
1306
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001307 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001308 block = ram_list.mru_block;
1309 if (block && addr - block->offset < block->length) {
1310 goto found;
1311 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001312 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001313 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001314 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001315 }
pbrook94a6b542009-04-11 17:15:54 +00001316 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001317
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 abort();
1320
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001321found:
1322 ram_list.mru_block = block;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001323 return block;
1324}
1325
1326/* Return a host pointer to ram allocated with qemu_ram_alloc.
1327 With the exception of the softmmu code in this file, this should
1328 only be used for local memory (e.g. video ram) that the device owns,
1329 and knows it isn't going to access beyond the end of the block.
1330
1331 It should not be used for general purpose DMA.
1332 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1333 */
1334void *qemu_get_ram_ptr(ram_addr_t addr)
1335{
1336 RAMBlock *block = qemu_get_ram_block(addr);
1337
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001338 if (xen_enabled()) {
1339 /* We need to check if the requested address is in the RAM
1340 * because we don't want to map the entire memory in QEMU.
1341 * In that case just map until the end of the page.
1342 */
1343 if (block->offset == 0) {
1344 return xen_map_cache(addr, 0, 0);
1345 } else if (block->host == NULL) {
1346 block->host =
1347 xen_map_cache(block->offset, block->length, 1);
1348 }
1349 }
1350 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001351}
1352
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001353/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1354 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1355 *
1356 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001357 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001358static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001359{
1360 RAMBlock *block;
1361
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001362 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001363 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001364 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001365 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001366 /* We need to check if the requested address is in the RAM
1367 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001368 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001369 */
1370 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001371 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001372 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001373 block->host =
1374 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001375 }
1376 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001377 return block->host + (addr - block->offset);
1378 }
1379 }
1380
1381 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1382 abort();
1383
1384 return NULL;
1385}
1386
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001387/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1388 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001389static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001390{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001391 if (*size == 0) {
1392 return NULL;
1393 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001394 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001395 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001396 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001397 RAMBlock *block;
1398
Paolo Bonzinia3161032012-11-14 15:54:48 +01001399 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001400 if (addr - block->offset < block->length) {
1401 if (addr - block->offset + *size > block->length)
1402 *size = block->length - addr + block->offset;
1403 return block->host + (addr - block->offset);
1404 }
1405 }
1406
1407 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1408 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001409 }
1410}
1411
Paolo Bonzini7443b432013-06-03 12:44:02 +02001412/* Some of the softmmu routines need to translate from a host pointer
1413 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001414MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001415{
pbrook94a6b542009-04-11 17:15:54 +00001416 RAMBlock *block;
1417 uint8_t *host = ptr;
1418
Jan Kiszka868bb332011-06-21 22:59:09 +02001419 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001420 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001421 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001422 }
1423
Paolo Bonzini23887b72013-05-06 14:28:39 +02001424 block = ram_list.mru_block;
1425 if (block && block->host && host - block->host < block->length) {
1426 goto found;
1427 }
1428
Paolo Bonzinia3161032012-11-14 15:54:48 +01001429 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001430 /* This case append when the block is not mapped. */
1431 if (block->host == NULL) {
1432 continue;
1433 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001434 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001435 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001436 }
pbrook94a6b542009-04-11 17:15:54 +00001437 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001438
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001439 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001440
1441found:
1442 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001443 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001444}
Alex Williamsonf471a172010-06-11 11:11:42 -06001445
Avi Kivitya8170e52012-10-23 12:30:10 +02001446static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001447 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001448{
bellard3a7d9292005-08-21 09:26:42 +00001449 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001450 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001451 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001452 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001453 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001454 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001455 switch (size) {
1456 case 1:
1457 stb_p(qemu_get_ram_ptr(ram_addr), val);
1458 break;
1459 case 2:
1460 stw_p(qemu_get_ram_ptr(ram_addr), val);
1461 break;
1462 case 4:
1463 stl_p(qemu_get_ram_ptr(ram_addr), val);
1464 break;
1465 default:
1466 abort();
1467 }
bellardf23db162005-08-21 19:12:28 +00001468 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001469 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001470 /* we remove the notdirty callback only if the code has been
1471 flushed */
1472 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001473 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001474}
1475
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001476static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1477 unsigned size, bool is_write)
1478{
1479 return is_write;
1480}
1481
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001482static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001483 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001484 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001485 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001486};
1487
pbrook0f459d12008-06-09 00:20:13 +00001488/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001489static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001490{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001491 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001492 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001493 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001494 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001495 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001496
aliguori06d55cc2008-11-18 20:24:06 +00001497 if (env->watchpoint_hit) {
1498 /* We re-entered the check after replacing the TB. Now raise
1499 * the debug interrupt so that is will trigger after the
1500 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001501 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001502 return;
1503 }
pbrook2e70f6e2008-06-29 01:03:05 +00001504 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001505 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001506 if ((vaddr == (wp->vaddr & len_mask) ||
1507 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001508 wp->flags |= BP_WATCHPOINT_HIT;
1509 if (!env->watchpoint_hit) {
1510 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001511 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001512 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1513 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001514 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001515 } else {
1516 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1517 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001518 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001519 }
aliguori06d55cc2008-11-18 20:24:06 +00001520 }
aliguori6e140f22008-11-18 20:37:55 +00001521 } else {
1522 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001523 }
1524 }
1525}
1526
pbrook6658ffb2007-03-16 23:58:11 +00001527/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1528 so these check for a hit then pass through to the normal out-of-line
1529 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001530static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001531 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001532{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001533 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1534 switch (size) {
1535 case 1: return ldub_phys(addr);
1536 case 2: return lduw_phys(addr);
1537 case 4: return ldl_phys(addr);
1538 default: abort();
1539 }
pbrook6658ffb2007-03-16 23:58:11 +00001540}
1541
Avi Kivitya8170e52012-10-23 12:30:10 +02001542static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001543 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001544{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001545 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1546 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001547 case 1:
1548 stb_phys(addr, val);
1549 break;
1550 case 2:
1551 stw_phys(addr, val);
1552 break;
1553 case 4:
1554 stl_phys(addr, val);
1555 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001556 default: abort();
1557 }
pbrook6658ffb2007-03-16 23:58:11 +00001558}
1559
Avi Kivity1ec9b902012-01-02 12:47:48 +02001560static const MemoryRegionOps watch_mem_ops = {
1561 .read = watch_mem_read,
1562 .write = watch_mem_write,
1563 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001564};
pbrook6658ffb2007-03-16 23:58:11 +00001565
Avi Kivitya8170e52012-10-23 12:30:10 +02001566static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001567 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001568{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001569 subpage_t *subpage = opaque;
1570 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001571
blueswir1db7b5422007-05-26 17:36:03 +00001572#if defined(DEBUG_SUBPAGE)
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001573 printf("%s: subpage %p len %d addr " TARGET_FMT_plx "\n", __func__,
1574 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001575#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001576 address_space_read(subpage->as, addr + subpage->base, buf, len);
1577 switch (len) {
1578 case 1:
1579 return ldub_p(buf);
1580 case 2:
1581 return lduw_p(buf);
1582 case 4:
1583 return ldl_p(buf);
1584 default:
1585 abort();
1586 }
blueswir1db7b5422007-05-26 17:36:03 +00001587}
1588
Avi Kivitya8170e52012-10-23 12:30:10 +02001589static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001590 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001591{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001592 subpage_t *subpage = opaque;
1593 uint8_t buf[4];
1594
blueswir1db7b5422007-05-26 17:36:03 +00001595#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001596 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001597 " value %"PRIx64"\n",
1598 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001599#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001600 switch (len) {
1601 case 1:
1602 stb_p(buf, value);
1603 break;
1604 case 2:
1605 stw_p(buf, value);
1606 break;
1607 case 4:
1608 stl_p(buf, value);
1609 break;
1610 default:
1611 abort();
1612 }
1613 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001614}
1615
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001616static bool subpage_accepts(void *opaque, hwaddr addr,
1617 unsigned size, bool is_write)
1618{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001619 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001620#if defined(DEBUG_SUBPAGE)
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001621 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx "\n",
1622 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001623#endif
1624
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001625 return address_space_access_valid(subpage->as, addr + subpage->base,
1626 size, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001627}
1628
Avi Kivity70c68e42012-01-02 12:32:48 +02001629static const MemoryRegionOps subpage_ops = {
1630 .read = subpage_read,
1631 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001632 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001633 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001634};
1635
Anthony Liguoric227f092009-10-01 16:12:16 -05001636static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001637 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001638{
1639 int idx, eidx;
1640
1641 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1642 return -1;
1643 idx = SUBPAGE_IDX(start);
1644 eidx = SUBPAGE_IDX(end);
1645#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001646 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001647 mmio, start, end, idx, eidx, memory);
1648#endif
blueswir1db7b5422007-05-26 17:36:03 +00001649 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001650 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001651 }
1652
1653 return 0;
1654}
1655
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001656static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001657{
Anthony Liguoric227f092009-10-01 16:12:16 -05001658 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001659
Anthony Liguori7267c092011-08-20 22:09:37 -05001660 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001661
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001662 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001663 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001664 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001665 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001666 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001667#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001668 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1669 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001670#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001671 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001672
1673 return mmio;
1674}
1675
Avi Kivity5312bd82012-02-12 18:32:55 +02001676static uint16_t dummy_section(MemoryRegion *mr)
1677{
1678 MemoryRegionSection section = {
1679 .mr = mr,
1680 .offset_within_address_space = 0,
1681 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001682 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001683 };
1684
1685 return phys_section_add(&section);
1686}
1687
Avi Kivitya8170e52012-10-23 12:30:10 +02001688MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001689{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001690 return cur_map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001691}
1692
Avi Kivitye9179ce2009-06-14 11:38:52 +03001693static void io_mem_init(void)
1694{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001695 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1696 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001697 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001698 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001699 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001700 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001701 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001702}
1703
Avi Kivityac1970f2012-10-03 16:22:53 +02001704static void mem_begin(MemoryListener *listener)
1705{
1706 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1707
Avi Kivityac1970f2012-10-03 16:22:53 +02001708 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1709}
1710
Avi Kivity50c1e142012-02-08 21:36:02 +02001711static void core_begin(MemoryListener *listener)
1712{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001713 uint16_t n;
1714
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001715 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001716 n = dummy_section(&io_mem_unassigned);
1717 assert(n == PHYS_SECTION_UNASSIGNED);
1718 n = dummy_section(&io_mem_notdirty);
1719 assert(n == PHYS_SECTION_NOTDIRTY);
1720 n = dummy_section(&io_mem_rom);
1721 assert(n == PHYS_SECTION_ROM);
1722 n = dummy_section(&io_mem_watch);
1723 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001724}
1725
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001726/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1727 * All AddressSpaceDispatch instances have switched to the next map.
1728 */
1729static void core_commit(MemoryListener *listener)
1730{
1731 PhysPageMap info = cur_map;
1732 cur_map = next_map;
1733 phys_sections_clear(&info);
1734}
1735
Avi Kivity1d711482012-10-02 18:54:45 +02001736static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001737{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001738 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001739
1740 /* since each CPU stores ram addresses in its TLB cache, we must
1741 reset the modified entries */
1742 /* XXX: slow ! */
1743 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1744 tlb_flush(env, 1);
1745 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001746}
1747
Avi Kivity93632742012-02-08 16:54:16 +02001748static void core_log_global_start(MemoryListener *listener)
1749{
1750 cpu_physical_memory_set_dirty_tracking(1);
1751}
1752
1753static void core_log_global_stop(MemoryListener *listener)
1754{
1755 cpu_physical_memory_set_dirty_tracking(0);
1756}
1757
Avi Kivity93632742012-02-08 16:54:16 +02001758static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001759 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001760 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001761 .log_global_start = core_log_global_start,
1762 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001763 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001764};
1765
Avi Kivity1d711482012-10-02 18:54:45 +02001766static MemoryListener tcg_memory_listener = {
1767 .commit = tcg_commit,
1768};
1769
Avi Kivityac1970f2012-10-03 16:22:53 +02001770void address_space_init_dispatch(AddressSpace *as)
1771{
1772 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1773
1774 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1775 d->listener = (MemoryListener) {
1776 .begin = mem_begin,
1777 .region_add = mem_add,
1778 .region_nop = mem_add,
1779 .priority = 0,
1780 };
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001781 d->as = as;
Avi Kivityac1970f2012-10-03 16:22:53 +02001782 as->dispatch = d;
1783 memory_listener_register(&d->listener, as);
1784}
1785
Avi Kivity83f3c252012-10-07 12:59:55 +02001786void address_space_destroy_dispatch(AddressSpace *as)
1787{
1788 AddressSpaceDispatch *d = as->dispatch;
1789
1790 memory_listener_unregister(&d->listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001791 g_free(d);
1792 as->dispatch = NULL;
1793}
1794
Avi Kivity62152b82011-07-26 14:26:14 +03001795static void memory_map_init(void)
1796{
Anthony Liguori7267c092011-08-20 22:09:37 -05001797 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001798 memory_region_init(system_memory, NULL, "system", INT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001799 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001800
Anthony Liguori7267c092011-08-20 22:09:37 -05001801 system_io = g_malloc(sizeof(*system_io));
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001802 memory_region_init(system_io, NULL, "io", 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001803 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001804
Avi Kivityf6790af2012-10-02 20:13:51 +02001805 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivityf6790af2012-10-02 20:13:51 +02001806 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
Avi Kivity309cb472011-08-08 16:09:03 +03001814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
pbrooke2eef172008-06-08 01:09:01 +00001819#endif /* !defined(CONFIG_USER_ONLY) */
1820
bellard13eb76e2004-01-24 15:23:36 +00001821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001823int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001824 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001825{
1826 int l, flags;
1827 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001828 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001837 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001840 return -1;
bellard579a97f2007-11-11 14:26:47 +00001841 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001846 } else {
1847 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001848 return -1;
bellard579a97f2007-11-11 14:26:47 +00001849 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001851 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001852 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001853 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
Paul Brooka68fe892010-03-01 00:08:59 +00001859 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001860}
bellard8df1cd02005-01-28 22:37:22 +00001861
bellard13eb76e2004-01-24 15:23:36 +00001862#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001863
Avi Kivitya8170e52012-10-23 12:30:10 +02001864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001873 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001874}
1875
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001876static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1877{
1878 if (memory_region_is_ram(mr)) {
1879 return !(is_write && mr->readonly);
1880 }
1881 if (memory_region_is_romd(mr)) {
1882 return !is_write;
1883 }
1884
1885 return false;
1886}
1887
Jan Kiszkaf52cc462013-05-26 21:42:40 +02001888static inline int memory_access_size(MemoryRegion *mr, int l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001889{
Jan Kiszkaf52cc462013-05-26 21:42:40 +02001890 if (l >= 4 && (((addr & 3) == 0 || mr->ops->impl.unaligned))) {
Paolo Bonzini82f25632013-05-24 11:59:43 +02001891 return 4;
1892 }
Jan Kiszkaf52cc462013-05-26 21:42:40 +02001893 if (l >= 2 && (((addr & 1) == 0) || mr->ops->impl.unaligned)) {
Paolo Bonzini82f25632013-05-24 11:59:43 +02001894 return 2;
1895 }
1896 return 1;
1897}
1898
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001899bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001900 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001901{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001902 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001903 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001904 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001905 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001906 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001907 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001908
bellard13eb76e2004-01-24 15:23:36 +00001909 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001910 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001911 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001912
bellard13eb76e2004-01-24 15:23:36 +00001913 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001914 if (!memory_access_is_direct(mr, is_write)) {
1915 l = memory_access_size(mr, l, addr1);
bellard6a00d602005-11-21 23:25:50 +00001916 /* XXX: could force cpu_single_env to NULL to avoid
1917 potential bugs */
Paolo Bonzini82f25632013-05-24 11:59:43 +02001918 if (l == 4) {
bellard1c213d12005-09-03 10:49:04 +00001919 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001920 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001921 error |= io_mem_write(mr, addr1, val, 4);
Paolo Bonzini82f25632013-05-24 11:59:43 +02001922 } else if (l == 2) {
bellard1c213d12005-09-03 10:49:04 +00001923 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001924 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001925 error |= io_mem_write(mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001926 } else {
bellard1c213d12005-09-03 10:49:04 +00001927 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001928 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001929 error |= io_mem_write(mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001930 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001931 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001932 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00001933 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001934 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001935 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001936 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001937 }
1938 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001939 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001940 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001941 l = memory_access_size(mr, l, addr1);
Paolo Bonzini82f25632013-05-24 11:59:43 +02001942 if (l == 4) {
bellard13eb76e2004-01-24 15:23:36 +00001943 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001944 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001945 stl_p(buf, val);
Paolo Bonzini82f25632013-05-24 11:59:43 +02001946 } else if (l == 2) {
bellard13eb76e2004-01-24 15:23:36 +00001947 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001948 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001949 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001950 } else {
bellard1c213d12005-09-03 10:49:04 +00001951 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001952 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001953 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001954 }
1955 } else {
1956 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001957 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001958 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001959 }
1960 }
1961 len -= l;
1962 buf += l;
1963 addr += l;
1964 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001965
1966 return error;
bellard13eb76e2004-01-24 15:23:36 +00001967}
bellard8df1cd02005-01-28 22:37:22 +00001968
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001969bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001970 const uint8_t *buf, int len)
1971{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001972 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02001973}
1974
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001975bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001976{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001977 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02001978}
1979
1980
Avi Kivitya8170e52012-10-23 12:30:10 +02001981void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001982 int len, int is_write)
1983{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001984 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02001985}
1986
bellardd0ecd2a2006-04-23 17:14:48 +00001987/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001988void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001989 const uint8_t *buf, int len)
1990{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001991 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00001992 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001993 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001994 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00001995
bellardd0ecd2a2006-04-23 17:14:48 +00001996 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001997 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001998 mr = address_space_translate(&address_space_memory,
1999 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002000
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002001 if (!(memory_region_is_ram(mr) ||
2002 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002003 /* do nothing */
2004 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002005 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002006 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002007 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002008 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002009 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002010 }
2011 len -= l;
2012 buf += l;
2013 addr += l;
2014 }
2015}
2016
aliguori6d16c2f2009-01-22 16:59:11 +00002017typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002018 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002019 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002020 hwaddr addr;
2021 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002022} BounceBuffer;
2023
2024static BounceBuffer bounce;
2025
aliguoriba223c22009-01-22 16:59:16 +00002026typedef struct MapClient {
2027 void *opaque;
2028 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002029 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002030} MapClient;
2031
Blue Swirl72cf2d42009-09-12 07:36:22 +00002032static QLIST_HEAD(map_client_list, MapClient) map_client_list
2033 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002034
2035void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2036{
Anthony Liguori7267c092011-08-20 22:09:37 -05002037 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002038
2039 client->opaque = opaque;
2040 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002041 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002042 return client;
2043}
2044
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002045static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002046{
2047 MapClient *client = (MapClient *)_client;
2048
Blue Swirl72cf2d42009-09-12 07:36:22 +00002049 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002050 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002051}
2052
2053static void cpu_notify_map_clients(void)
2054{
2055 MapClient *client;
2056
Blue Swirl72cf2d42009-09-12 07:36:22 +00002057 while (!QLIST_EMPTY(&map_client_list)) {
2058 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002059 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002060 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002061 }
2062}
2063
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002064bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2065{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002066 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002067 hwaddr l, xlat;
2068
2069 while (len > 0) {
2070 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002071 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2072 if (!memory_access_is_direct(mr, is_write)) {
2073 l = memory_access_size(mr, l, addr);
2074 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002075 return false;
2076 }
2077 }
2078
2079 len -= l;
2080 addr += l;
2081 }
2082 return true;
2083}
2084
aliguori6d16c2f2009-01-22 16:59:11 +00002085/* Map a physical memory region into a host virtual address.
2086 * May map a subset of the requested range, given by and returned in *plen.
2087 * May return NULL if resources needed to perform the mapping are exhausted.
2088 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002089 * Use cpu_register_map_client() to know when retrying the map operation is
2090 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002091 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002092void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002093 hwaddr addr,
2094 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002095 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002096{
Avi Kivitya8170e52012-10-23 12:30:10 +02002097 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002098 hwaddr done = 0;
2099 hwaddr l, xlat, base;
2100 MemoryRegion *mr, *this_mr;
2101 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002102
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002103 if (len == 0) {
2104 return NULL;
2105 }
aliguori6d16c2f2009-01-22 16:59:11 +00002106
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002107 l = len;
2108 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2109 if (!memory_access_is_direct(mr, is_write)) {
2110 if (bounce.buffer) {
2111 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002112 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002113 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2114 bounce.addr = addr;
2115 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002116
2117 memory_region_ref(mr);
2118 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002119 if (!is_write) {
2120 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002121 }
aliguori6d16c2f2009-01-22 16:59:11 +00002122
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002123 *plen = l;
2124 return bounce.buffer;
2125 }
2126
2127 base = xlat;
2128 raddr = memory_region_get_ram_addr(mr);
2129
2130 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002131 len -= l;
2132 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002133 done += l;
2134 if (len == 0) {
2135 break;
2136 }
2137
2138 l = len;
2139 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2140 if (this_mr != mr || xlat != base + done) {
2141 break;
2142 }
aliguori6d16c2f2009-01-22 16:59:11 +00002143 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002144
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002145 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002146 *plen = done;
2147 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002148}
2149
Avi Kivityac1970f2012-10-03 16:22:53 +02002150/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002151 * Will also mark the memory as dirty if is_write == 1. access_len gives
2152 * the amount of memory that was actually read or written by the caller.
2153 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002154void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2155 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002156{
2157 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002158 MemoryRegion *mr;
2159 ram_addr_t addr1;
2160
2161 mr = qemu_ram_addr_from_host(buffer, &addr1);
2162 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002163 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002164 while (access_len) {
2165 unsigned l;
2166 l = TARGET_PAGE_SIZE;
2167 if (l > access_len)
2168 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002169 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002170 addr1 += l;
2171 access_len -= l;
2172 }
2173 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002174 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002175 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002176 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002177 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002178 return;
2179 }
2180 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002181 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002182 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002183 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002184 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002185 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002186 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002187}
bellardd0ecd2a2006-04-23 17:14:48 +00002188
Avi Kivitya8170e52012-10-23 12:30:10 +02002189void *cpu_physical_memory_map(hwaddr addr,
2190 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002191 int is_write)
2192{
2193 return address_space_map(&address_space_memory, addr, plen, is_write);
2194}
2195
Avi Kivitya8170e52012-10-23 12:30:10 +02002196void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2197 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002198{
2199 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2200}
2201
bellard8df1cd02005-01-28 22:37:22 +00002202/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002203static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002204 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002205{
bellard8df1cd02005-01-28 22:37:22 +00002206 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002207 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002208 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002209 hwaddr l = 4;
2210 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002211
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002212 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2213 false);
2214 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002215 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002216 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002217#if defined(TARGET_WORDS_BIGENDIAN)
2218 if (endian == DEVICE_LITTLE_ENDIAN) {
2219 val = bswap32(val);
2220 }
2221#else
2222 if (endian == DEVICE_BIG_ENDIAN) {
2223 val = bswap32(val);
2224 }
2225#endif
bellard8df1cd02005-01-28 22:37:22 +00002226 } else {
2227 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002228 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002229 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002230 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002231 switch (endian) {
2232 case DEVICE_LITTLE_ENDIAN:
2233 val = ldl_le_p(ptr);
2234 break;
2235 case DEVICE_BIG_ENDIAN:
2236 val = ldl_be_p(ptr);
2237 break;
2238 default:
2239 val = ldl_p(ptr);
2240 break;
2241 }
bellard8df1cd02005-01-28 22:37:22 +00002242 }
2243 return val;
2244}
2245
Avi Kivitya8170e52012-10-23 12:30:10 +02002246uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002247{
2248 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2249}
2250
Avi Kivitya8170e52012-10-23 12:30:10 +02002251uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002252{
2253 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2254}
2255
Avi Kivitya8170e52012-10-23 12:30:10 +02002256uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002257{
2258 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2259}
2260
bellard84b7b8e2005-11-28 21:19:04 +00002261/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002262static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002263 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002264{
bellard84b7b8e2005-11-28 21:19:04 +00002265 uint8_t *ptr;
2266 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002267 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002268 hwaddr l = 8;
2269 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002270
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002271 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2272 false);
2273 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002274 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002275 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002276#if defined(TARGET_WORDS_BIGENDIAN)
2277 if (endian == DEVICE_LITTLE_ENDIAN) {
2278 val = bswap64(val);
2279 }
2280#else
2281 if (endian == DEVICE_BIG_ENDIAN) {
2282 val = bswap64(val);
2283 }
2284#endif
bellard84b7b8e2005-11-28 21:19:04 +00002285 } else {
2286 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002287 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002288 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002289 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002290 switch (endian) {
2291 case DEVICE_LITTLE_ENDIAN:
2292 val = ldq_le_p(ptr);
2293 break;
2294 case DEVICE_BIG_ENDIAN:
2295 val = ldq_be_p(ptr);
2296 break;
2297 default:
2298 val = ldq_p(ptr);
2299 break;
2300 }
bellard84b7b8e2005-11-28 21:19:04 +00002301 }
2302 return val;
2303}
2304
Avi Kivitya8170e52012-10-23 12:30:10 +02002305uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002306{
2307 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2308}
2309
Avi Kivitya8170e52012-10-23 12:30:10 +02002310uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002311{
2312 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2313}
2314
Avi Kivitya8170e52012-10-23 12:30:10 +02002315uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002316{
2317 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2318}
2319
bellardaab33092005-10-30 20:48:42 +00002320/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002321uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002322{
2323 uint8_t val;
2324 cpu_physical_memory_read(addr, &val, 1);
2325 return val;
2326}
2327
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002328/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002329static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002330 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002331{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002332 uint8_t *ptr;
2333 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002334 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002335 hwaddr l = 2;
2336 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002337
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002338 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2339 false);
2340 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002341 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002342 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002343#if defined(TARGET_WORDS_BIGENDIAN)
2344 if (endian == DEVICE_LITTLE_ENDIAN) {
2345 val = bswap16(val);
2346 }
2347#else
2348 if (endian == DEVICE_BIG_ENDIAN) {
2349 val = bswap16(val);
2350 }
2351#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002352 } else {
2353 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002354 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002355 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002356 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002357 switch (endian) {
2358 case DEVICE_LITTLE_ENDIAN:
2359 val = lduw_le_p(ptr);
2360 break;
2361 case DEVICE_BIG_ENDIAN:
2362 val = lduw_be_p(ptr);
2363 break;
2364 default:
2365 val = lduw_p(ptr);
2366 break;
2367 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002368 }
2369 return val;
bellardaab33092005-10-30 20:48:42 +00002370}
2371
Avi Kivitya8170e52012-10-23 12:30:10 +02002372uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002373{
2374 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2375}
2376
Avi Kivitya8170e52012-10-23 12:30:10 +02002377uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002378{
2379 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2380}
2381
Avi Kivitya8170e52012-10-23 12:30:10 +02002382uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002383{
2384 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2385}
2386
bellard8df1cd02005-01-28 22:37:22 +00002387/* warning: addr must be aligned. The ram page is not masked as dirty
2388 and the code inside is not invalidated. It is useful if the dirty
2389 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002390void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002391{
bellard8df1cd02005-01-28 22:37:22 +00002392 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002393 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002394 hwaddr l = 4;
2395 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002396
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002397 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2398 true);
2399 if (l < 4 || !memory_access_is_direct(mr, true)) {
2400 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002401 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002402 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002403 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002404 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002405
2406 if (unlikely(in_migration)) {
2407 if (!cpu_physical_memory_is_dirty(addr1)) {
2408 /* invalidate code */
2409 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2410 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002411 cpu_physical_memory_set_dirty_flags(
2412 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002413 }
2414 }
bellard8df1cd02005-01-28 22:37:22 +00002415 }
2416}
2417
2418/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002419static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002420 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002421{
bellard8df1cd02005-01-28 22:37:22 +00002422 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002423 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002424 hwaddr l = 4;
2425 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002426
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002427 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2428 true);
2429 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002430#if defined(TARGET_WORDS_BIGENDIAN)
2431 if (endian == DEVICE_LITTLE_ENDIAN) {
2432 val = bswap32(val);
2433 }
2434#else
2435 if (endian == DEVICE_BIG_ENDIAN) {
2436 val = bswap32(val);
2437 }
2438#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002439 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002440 } else {
bellard8df1cd02005-01-28 22:37:22 +00002441 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002442 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002443 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002444 switch (endian) {
2445 case DEVICE_LITTLE_ENDIAN:
2446 stl_le_p(ptr, val);
2447 break;
2448 case DEVICE_BIG_ENDIAN:
2449 stl_be_p(ptr, val);
2450 break;
2451 default:
2452 stl_p(ptr, val);
2453 break;
2454 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002455 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002456 }
2457}
2458
Avi Kivitya8170e52012-10-23 12:30:10 +02002459void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002460{
2461 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2462}
2463
Avi Kivitya8170e52012-10-23 12:30:10 +02002464void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002465{
2466 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2467}
2468
Avi Kivitya8170e52012-10-23 12:30:10 +02002469void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002470{
2471 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2472}
2473
bellardaab33092005-10-30 20:48:42 +00002474/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002475void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002476{
2477 uint8_t v = val;
2478 cpu_physical_memory_write(addr, &v, 1);
2479}
2480
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002482static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002483 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002484{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002486 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002487 hwaddr l = 2;
2488 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002489
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002490 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2491 true);
2492 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002493#if defined(TARGET_WORDS_BIGENDIAN)
2494 if (endian == DEVICE_LITTLE_ENDIAN) {
2495 val = bswap16(val);
2496 }
2497#else
2498 if (endian == DEVICE_BIG_ENDIAN) {
2499 val = bswap16(val);
2500 }
2501#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002502 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002503 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002504 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002505 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002506 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002507 switch (endian) {
2508 case DEVICE_LITTLE_ENDIAN:
2509 stw_le_p(ptr, val);
2510 break;
2511 case DEVICE_BIG_ENDIAN:
2512 stw_be_p(ptr, val);
2513 break;
2514 default:
2515 stw_p(ptr, val);
2516 break;
2517 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002518 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002519 }
bellardaab33092005-10-30 20:48:42 +00002520}
2521
Avi Kivitya8170e52012-10-23 12:30:10 +02002522void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002523{
2524 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2525}
2526
Avi Kivitya8170e52012-10-23 12:30:10 +02002527void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002528{
2529 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2530}
2531
Avi Kivitya8170e52012-10-23 12:30:10 +02002532void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533{
2534 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2535}
2536
bellardaab33092005-10-30 20:48:42 +00002537/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002538void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002539{
2540 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002541 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002542}
2543
Avi Kivitya8170e52012-10-23 12:30:10 +02002544void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002545{
2546 val = cpu_to_le64(val);
2547 cpu_physical_memory_write(addr, &val, 8);
2548}
2549
Avi Kivitya8170e52012-10-23 12:30:10 +02002550void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002551{
2552 val = cpu_to_be64(val);
2553 cpu_physical_memory_write(addr, &val, 8);
2554}
2555
aliguori5e2972f2009-03-28 17:51:36 +00002556/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002557int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002558 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002559{
2560 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002561 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002562 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002563
2564 while (len > 0) {
2565 page = addr & TARGET_PAGE_MASK;
2566 phys_addr = cpu_get_phys_page_debug(env, page);
2567 /* if no physical page mapped, return an error */
2568 if (phys_addr == -1)
2569 return -1;
2570 l = (page + TARGET_PAGE_SIZE) - addr;
2571 if (l > len)
2572 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002573 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002574 if (is_write)
2575 cpu_physical_memory_write_rom(phys_addr, buf, l);
2576 else
aliguori5e2972f2009-03-28 17:51:36 +00002577 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002578 len -= l;
2579 buf += l;
2580 addr += l;
2581 }
2582 return 0;
2583}
Paul Brooka68fe892010-03-01 00:08:59 +00002584#endif
bellard13eb76e2004-01-24 15:23:36 +00002585
Blue Swirl8e4a4242013-01-06 18:30:17 +00002586#if !defined(CONFIG_USER_ONLY)
2587
2588/*
2589 * A helper function for the _utterly broken_ virtio device model to find out if
2590 * it's running on a big endian machine. Don't do this at home kids!
2591 */
2592bool virtio_is_big_endian(void);
2593bool virtio_is_big_endian(void)
2594{
2595#if defined(TARGET_WORDS_BIGENDIAN)
2596 return true;
2597#else
2598 return false;
2599#endif
2600}
2601
2602#endif
2603
Wen Congyang76f35532012-05-07 12:04:18 +08002604#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002605bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002606{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002607 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002608 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002609
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002610 mr = address_space_translate(&address_space_memory,
2611 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002612
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002613 return !(memory_region_is_ram(mr) ||
2614 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002615}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002616
2617void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2618{
2619 RAMBlock *block;
2620
2621 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2622 func(block->host, block->offset, block->length, opaque);
2623 }
2624}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002625#endif