blob: dd1e57660a72da820b1203740ba093571e0fde8b [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060029#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020032#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010033#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/timer.h"
35#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020036#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020052#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020054#include "qemu/range.h"
55
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020059static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000060
Paolo Bonzinia3161032012-11-14 15:54:48 +010061RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030062
63static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030064static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030065
Avi Kivityf6790af2012-10-02 20:13:51 +020066AddressSpace address_space_io;
67AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020070static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080072/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080075/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
pbrooke2eef172008-06-08 01:09:01 +000078#endif
bellard9fa3e852004-01-04 18:06:42 +000079
Andreas Färberbdc44642013-06-24 23:50:24 +020080struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000081/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020083DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000084/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000085 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000086 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010087int use_icount;
bellard6a00d602005-11-21 23:25:50 +000088
pbrooke2eef172008-06-08 01:09:01 +000089#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020090
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020091typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020094 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020095 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020096 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020097 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020098};
99
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
Paolo Bonzini03f49952013-11-07 17:14:36 +0100102/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100103#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100104
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200105#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200111
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200126 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200127 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200128};
129
Jan Kiszka90260c62013-05-26 21:46:51 +0200130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200133 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200142
pbrooke2eef172008-06-08 01:09:01 +0000143static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300144static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000145static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000146
Avi Kivity1ec9b902012-01-02 12:47:48 +0200147static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000148#endif
bellard54936002003-05-13 00:25:15 +0000149
Paul Brook6d9a1302010-02-28 23:55:53 +0000150#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200151
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200158 }
159}
160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162{
163 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200164 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100169 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200172 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200174}
175
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200178 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200179{
180 PhysPageEntry *p;
181 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200183
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100188 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200189 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 }
192 }
193 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200194 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200195 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200197
Paolo Bonzini03f49952013-11-07 17:14:36 +0100198 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200200 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200201 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200202 *index += step;
203 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200204 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200206 }
207 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200208 }
209}
210
Avi Kivityac1970f2012-10-03 16:22:53 +0200211static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200212 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200213 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000214{
Avi Kivity29990972012-02-13 20:21:20 +0200215 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000217
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000219}
220
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200280 }
281}
282
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200284 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000285{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200286 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200287 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200289
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200292 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200293 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200296 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200305}
306
Blue Swirle5548612012-04-21 13:08:33 +0000307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000310 && mr != &io_mem_watch;
311}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200312
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200314 hwaddr addr,
315 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200316{
Jan Kiszka90260c62013-05-26 21:46:51 +0200317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200324 }
325 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200326}
327
Jan Kiszka90260c62013-05-26 21:46:51 +0200328static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200330 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331{
332 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100333 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200334
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200335 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200344 return section;
345}
Jan Kiszka90260c62013-05-26 21:46:51 +0200346
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200362{
Avi Kivity30951152012-10-30 13:47:46 +0200363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
Le Tan8d7b8cb2014-08-16 13:55:37 +0800376 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
Avi Kivity30951152012-10-30 13:47:46 +0200393 *plen = len;
394 *xlat = addr;
395 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
Avi Kivity30951152012-10-30 13:47:46 +0200402 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200404
405 assert(!section->mr->iommu_ops);
406 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200407}
bellard9fa3e852004-01-04 18:06:42 +0000408#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000409
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200410void cpu_exec_init_all(void)
411{
412#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700413 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200414 memory_map_init();
415 io_mem_init();
416#endif
417}
418
Andreas Färberb170fce2013-01-20 20:23:22 +0100419#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000420
Juan Quintelae59fb372009-09-29 22:48:21 +0200421static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200422{
Andreas Färber259186a2013-01-17 18:51:17 +0100423 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200424
aurel323098dba2009-03-07 21:28:24 +0000425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100427 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100428 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000429
430 return 0;
431}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200432
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400433static int cpu_common_pre_load(void *opaque)
434{
435 CPUState *cpu = opaque;
436
437 cpu->exception_index = 0;
438
439 return 0;
440}
441
442static bool cpu_common_exception_index_needed(void *opaque)
443{
444 CPUState *cpu = opaque;
445
446 return cpu->exception_index != 0;
447}
448
449static const VMStateDescription vmstate_cpu_common_exception_index = {
450 .name = "cpu_common/exception_index",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField[]) {
454 VMSTATE_INT32(exception_index, CPUState),
455 VMSTATE_END_OF_LIST()
456 }
457};
458
Andreas Färber1a1562f2013-06-17 04:09:11 +0200459const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200460 .name = "cpu_common",
461 .version_id = 1,
462 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400463 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200464 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200465 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100466 VMSTATE_UINT32(halted, CPUState),
467 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200468 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400469 },
470 .subsections = (VMStateSubsection[]) {
471 {
472 .vmsd = &vmstate_cpu_common_exception_index,
473 .needed = cpu_common_exception_index_needed,
474 } , {
475 /* empty */
476 }
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200477 }
478};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200479
pbrook9656f322008-07-01 20:01:19 +0000480#endif
481
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100482CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400483{
Andreas Färberbdc44642013-06-24 23:50:24 +0200484 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400485
Andreas Färberbdc44642013-06-24 23:50:24 +0200486 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100487 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200488 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100489 }
Glauber Costa950f1472009-06-09 12:15:18 -0400490 }
491
Andreas Färberbdc44642013-06-24 23:50:24 +0200492 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400493}
494
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000495#if !defined(CONFIG_USER_ONLY)
496void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
497{
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu->as == as);
500
501 if (cpu->tcg_as_listener) {
502 memory_listener_unregister(cpu->tcg_as_listener);
503 } else {
504 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
505 }
506 cpu->tcg_as_listener->commit = tcg_commit;
507 memory_listener_register(cpu->tcg_as_listener, as);
508}
509#endif
510
Andreas Färber9349b4f2012-03-14 01:38:32 +0100511void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000512{
Andreas Färber9f09e182012-05-03 06:59:07 +0200513 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100514 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200515 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000516 int cpu_index;
517
pbrookc2764712009-03-07 15:24:59 +0000518#if defined(CONFIG_USER_ONLY)
519 cpu_list_lock();
520#endif
bellard6a00d602005-11-21 23:25:50 +0000521 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200522 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000523 cpu_index++;
524 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100525 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100526 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200527 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200528 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100529#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000530 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200531 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100532#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200533 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000534#if defined(CONFIG_USER_ONLY)
535 cpu_list_unlock();
536#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200537 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539 }
pbrookb3c77242008-06-30 16:31:04 +0000540#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600541 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000542 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100543 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200544 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000545#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100546 if (cc->vmsd != NULL) {
547 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548 }
bellardfd6ce8f2003-05-14 19:00:11 +0000549}
550
bellard1fddef42005-04-17 19:16:13 +0000551#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000552#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200553static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000554{
555 tb_invalidate_phys_page_range(pc, pc + 1, 0);
556}
557#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400559{
Max Filippove8262a12013-09-27 22:29:17 +0400560 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100563 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400564 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400565}
bellardc27004e2005-01-03 23:35:10 +0000566#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000567#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000568
Paul Brookc527ee82010-03-01 03:31:14 +0000569#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200570void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000571
572{
573}
574
Andreas Färber75a34032013-09-02 16:57:02 +0200575int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000576 int flags, CPUWatchpoint **watchpoint)
577{
578 return -ENOSYS;
579}
580#else
pbrook6658ffb2007-03-16 23:58:11 +0000581/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200582int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000583 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000584{
Andreas Färber75a34032013-09-02 16:57:02 +0200585 vaddr len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000586 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000587
aliguorib4051332008-11-18 20:14:20 +0000588 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400589 if ((len & (len - 1)) || (addr & ~len_mask) ||
590 len == 0 || len > TARGET_PAGE_SIZE) {
Andreas Färber75a34032013-09-02 16:57:02 +0200591 error_report("tried to set invalid watchpoint at %"
592 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000593 return -EINVAL;
594 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500595 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000596
aliguoria1d1bb32008-11-18 20:07:32 +0000597 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000598 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000599 wp->flags = flags;
600
aliguori2dc9f412008-11-18 20:56:59 +0000601 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200602 if (flags & BP_GDB) {
603 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
604 } else {
605 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
606 }
aliguoria1d1bb32008-11-18 20:07:32 +0000607
Andreas Färber31b030d2013-09-04 01:29:02 +0200608 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000609
610 if (watchpoint)
611 *watchpoint = wp;
612 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000613}
614
aliguoria1d1bb32008-11-18 20:07:32 +0000615/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200616int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000617 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000618{
Andreas Färber75a34032013-09-02 16:57:02 +0200619 vaddr len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000620 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000621
Andreas Färberff4700b2013-08-26 18:23:18 +0200622 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000623 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000624 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200625 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000626 return 0;
627 }
628 }
aliguoria1d1bb32008-11-18 20:07:32 +0000629 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000630}
631
aliguoria1d1bb32008-11-18 20:07:32 +0000632/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200633void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000634{
Andreas Färberff4700b2013-08-26 18:23:18 +0200635 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000636
Andreas Färber31b030d2013-09-04 01:29:02 +0200637 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000638
Anthony Liguori7267c092011-08-20 22:09:37 -0500639 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000640}
641
aliguoria1d1bb32008-11-18 20:07:32 +0000642/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200643void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000644{
aliguoric0ce9982008-11-25 22:13:57 +0000645 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000646
Andreas Färberff4700b2013-08-26 18:23:18 +0200647 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200648 if (wp->flags & mask) {
649 cpu_watchpoint_remove_by_ref(cpu, wp);
650 }
aliguoric0ce9982008-11-25 22:13:57 +0000651 }
aliguoria1d1bb32008-11-18 20:07:32 +0000652}
Paul Brookc527ee82010-03-01 03:31:14 +0000653#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000654
655/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200656int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000657 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000658{
bellard1fddef42005-04-17 19:16:13 +0000659#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000660 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000661
Anthony Liguori7267c092011-08-20 22:09:37 -0500662 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000663
664 bp->pc = pc;
665 bp->flags = flags;
666
aliguori2dc9f412008-11-18 20:56:59 +0000667 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200668 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200669 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200670 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200671 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200672 }
aliguoria1d1bb32008-11-18 20:07:32 +0000673
Andreas Färberf0c3c502013-08-26 21:22:53 +0200674 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000675
Andreas Färber00b941e2013-06-29 18:55:54 +0200676 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000677 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200678 }
aliguoria1d1bb32008-11-18 20:07:32 +0000679 return 0;
680#else
681 return -ENOSYS;
682#endif
683}
684
685/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200686int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000687{
688#if defined(TARGET_HAS_ICE)
689 CPUBreakpoint *bp;
690
Andreas Färberf0c3c502013-08-26 21:22:53 +0200691 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000692 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200693 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000694 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000695 }
bellard4c3a88a2003-07-26 12:06:08 +0000696 }
aliguoria1d1bb32008-11-18 20:07:32 +0000697 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000698#else
aliguoria1d1bb32008-11-18 20:07:32 +0000699 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000700#endif
701}
702
aliguoria1d1bb32008-11-18 20:07:32 +0000703/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200704void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000705{
bellard1fddef42005-04-17 19:16:13 +0000706#if defined(TARGET_HAS_ICE)
Andreas Färberf0c3c502013-08-26 21:22:53 +0200707 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
708
709 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000710
Anthony Liguori7267c092011-08-20 22:09:37 -0500711 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000712#endif
713}
714
715/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200716void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000717{
718#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000719 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000720
Andreas Färberf0c3c502013-08-26 21:22:53 +0200721 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200722 if (bp->flags & mask) {
723 cpu_breakpoint_remove_by_ref(cpu, bp);
724 }
aliguoric0ce9982008-11-25 22:13:57 +0000725 }
bellard4c3a88a2003-07-26 12:06:08 +0000726#endif
727}
728
bellardc33a3462003-07-29 20:50:33 +0000729/* enable or disable single step mode. EXCP_DEBUG is returned by the
730 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200731void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000732{
bellard1fddef42005-04-17 19:16:13 +0000733#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200734 if (cpu->singlestep_enabled != enabled) {
735 cpu->singlestep_enabled = enabled;
736 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200737 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200738 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100739 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000740 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200741 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000742 tb_flush(env);
743 }
bellardc33a3462003-07-29 20:50:33 +0000744 }
745#endif
746}
747
Andreas Färbera47dddd2013-09-03 17:38:47 +0200748void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000749{
750 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000751 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000752
753 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000754 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000755 fprintf(stderr, "qemu: fatal: ");
756 vfprintf(stderr, fmt, ap);
757 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200758 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000759 if (qemu_log_enabled()) {
760 qemu_log("qemu: fatal: ");
761 qemu_log_vprintf(fmt, ap2);
762 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200763 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000764 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000765 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000766 }
pbrook493ae1f2007-11-23 16:53:59 +0000767 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000768 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200769#if defined(CONFIG_USER_ONLY)
770 {
771 struct sigaction act;
772 sigfillset(&act.sa_mask);
773 act.sa_handler = SIG_DFL;
774 sigaction(SIGABRT, &act, NULL);
775 }
776#endif
bellard75012672003-06-21 13:11:07 +0000777 abort();
778}
779
bellard01243112004-01-04 15:48:17 +0000780#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200781static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
782{
783 RAMBlock *block;
784
785 /* The list is protected by the iothread lock here. */
786 block = ram_list.mru_block;
787 if (block && addr - block->offset < block->length) {
788 goto found;
789 }
790 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
791 if (addr - block->offset < block->length) {
792 goto found;
793 }
794 }
795
796 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
797 abort();
798
799found:
800 ram_list.mru_block = block;
801 return block;
802}
803
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200804static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000805{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200806 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200807 RAMBlock *block;
808 ram_addr_t end;
809
810 end = TARGET_PAGE_ALIGN(start + length);
811 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000812
Paolo Bonzini041603f2013-09-09 17:49:45 +0200813 block = qemu_get_ram_block(start);
814 assert(block == qemu_get_ram_block(end - 1));
815 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000816 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200817}
818
819/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200820void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200821 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200822{
Juan Quintelad24981d2012-05-22 00:42:40 +0200823 if (length == 0)
824 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200825 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200826
827 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200828 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200829 }
bellard1ccde1c2004-02-06 19:46:14 +0000830}
831
Juan Quintela981fdf22013-10-10 11:54:09 +0200832static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000833{
834 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000835}
836
Andreas Färberbb0e6272013-09-03 13:32:01 +0200837hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200838 MemoryRegionSection *section,
839 target_ulong vaddr,
840 hwaddr paddr, hwaddr xlat,
841 int prot,
842 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000843{
Avi Kivitya8170e52012-10-23 12:30:10 +0200844 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000845 CPUWatchpoint *wp;
846
Blue Swirlcc5bea62012-04-14 14:56:48 +0000847 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000848 /* Normal RAM. */
849 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200850 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000851 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200852 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000853 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200854 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000855 }
856 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100857 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200858 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000859 }
860
861 /* Make accesses to pages with watchpoints go via the
862 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200863 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Blue Swirle5548612012-04-21 13:08:33 +0000864 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
865 /* Avoid trapping reads of pages with a write breakpoint. */
866 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200867 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000868 *address |= TLB_MMIO;
869 break;
870 }
871 }
872 }
873
874 return iotlb;
875}
bellard9fa3e852004-01-04 18:06:42 +0000876#endif /* defined(CONFIG_USER_ONLY) */
877
pbrooke2eef172008-06-08 01:09:01 +0000878#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000879
Anthony Liguoric227f092009-10-01 16:12:16 -0500880static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200881 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200882static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200883
Stefan Weil575ddeb2013-09-29 20:56:45 +0200884static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200885
886/*
887 * Set a custom physical guest memory alloator.
888 * Accelerators with unusual needs may need this. Hopefully, we can
889 * get rid of it eventually.
890 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200891void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200892{
893 phys_mem_alloc = alloc;
894}
895
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200896static uint16_t phys_section_add(PhysPageMap *map,
897 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200898{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200899 /* The physical section number is ORed with a page-aligned
900 * pointer to produce the iotlb entries. Thus it should
901 * never overflow into the page-aligned value.
902 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200903 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200904
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200905 if (map->sections_nb == map->sections_nb_alloc) {
906 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
907 map->sections = g_renew(MemoryRegionSection, map->sections,
908 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200909 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200910 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200911 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200912 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200913}
914
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200915static void phys_section_destroy(MemoryRegion *mr)
916{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200917 memory_region_unref(mr);
918
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200919 if (mr->subpage) {
920 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700921 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200922 g_free(subpage);
923 }
924}
925
Paolo Bonzini60926662013-05-29 12:30:26 +0200926static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200927{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200928 while (map->sections_nb > 0) {
929 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200930 phys_section_destroy(section->mr);
931 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200932 g_free(map->sections);
933 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200934}
935
Avi Kivityac1970f2012-10-03 16:22:53 +0200936static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200937{
938 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200939 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200940 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200941 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200942 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200943 MemoryRegionSection subsection = {
944 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200945 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200946 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200947 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200948
Avi Kivityf3705d52012-03-08 16:16:34 +0200949 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200950
Avi Kivityf3705d52012-03-08 16:16:34 +0200951 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200952 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100953 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200954 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200955 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200956 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200957 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200958 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200959 }
960 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200961 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200962 subpage_register(subpage, start, end,
963 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200964}
965
966
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200967static void register_multipage(AddressSpaceDispatch *d,
968 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000969{
Avi Kivitya8170e52012-10-23 12:30:10 +0200970 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200971 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200972 uint64_t num_pages = int128_get64(int128_rshift(section->size,
973 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200974
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200975 assert(num_pages);
976 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000977}
978
Avi Kivityac1970f2012-10-03 16:22:53 +0200979static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200980{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200981 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200982 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200983 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200984 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200985
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200986 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
987 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
988 - now.offset_within_address_space;
989
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200990 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200991 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200992 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200993 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200994 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200995 while (int128_ne(remain.size, now.size)) {
996 remain.size = int128_sub(remain.size, now.size);
997 remain.offset_within_address_space += int128_get64(now.size);
998 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400999 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001000 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001001 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001002 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001003 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001004 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001005 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001006 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001007 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001008 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001009 }
1010}
1011
Sheng Yang62a27442010-01-26 19:21:16 +08001012void qemu_flush_coalesced_mmio_buffer(void)
1013{
1014 if (kvm_enabled())
1015 kvm_flush_coalesced_mmio_buffer();
1016}
1017
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001018void qemu_mutex_lock_ramlist(void)
1019{
1020 qemu_mutex_lock(&ram_list.mutex);
1021}
1022
1023void qemu_mutex_unlock_ramlist(void)
1024{
1025 qemu_mutex_unlock(&ram_list.mutex);
1026}
1027
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001028#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001029
1030#include <sys/vfs.h>
1031
1032#define HUGETLBFS_MAGIC 0x958458f6
1033
Hu Taofc7a5802014-09-09 13:28:01 +08001034static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001035{
1036 struct statfs fs;
1037 int ret;
1038
1039 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001040 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001041 } while (ret != 0 && errno == EINTR);
1042
1043 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001044 error_setg_errno(errp, errno, "failed to get page size of file %s",
1045 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001046 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001047 }
1048
1049 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001050 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001051
1052 return fs.f_bsize;
1053}
1054
Alex Williamson04b16652010-07-02 11:13:17 -06001055static void *file_ram_alloc(RAMBlock *block,
1056 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001057 const char *path,
1058 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001059{
1060 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001061 char *sanitized_name;
1062 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001063 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001064 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001065 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001066 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001067
Hu Taofc7a5802014-09-09 13:28:01 +08001068 hpagesize = gethugepagesize(path, &local_err);
1069 if (local_err) {
1070 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001071 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001072 }
1073
1074 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001075 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1076 "or larger than huge page size 0x%" PRIx64,
1077 memory, hpagesize);
1078 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001079 }
1080
1081 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001082 error_setg(errp,
1083 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001084 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001085 }
1086
Peter Feiner8ca761f2013-03-04 13:54:25 -05001087 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001088 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001089 for (c = sanitized_name; *c != '\0'; c++) {
1090 if (*c == '/')
1091 *c = '_';
1092 }
1093
1094 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1095 sanitized_name);
1096 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001097
1098 fd = mkstemp(filename);
1099 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001100 error_setg_errno(errp, errno,
1101 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001102 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001103 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001104 }
1105 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001106 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001107
1108 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1109
1110 /*
1111 * ftruncate is not supported by hugetlbfs in older
1112 * hosts, so don't bother bailing out on errors.
1113 * If anything goes wrong with it under other filesystems,
1114 * mmap will fail.
1115 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001116 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001117 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001118 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001119
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001120 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1121 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1122 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001123 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001124 error_setg_errno(errp, errno,
1125 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001126 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001127 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001128 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001129
1130 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001131 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001132 }
1133
Alex Williamson04b16652010-07-02 11:13:17 -06001134 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001135 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001136
1137error:
1138 if (mem_prealloc) {
1139 exit(1);
1140 }
1141 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001142}
1143#endif
1144
Alex Williamsond17b5282010-06-25 11:08:38 -06001145static ram_addr_t find_ram_offset(ram_addr_t size)
1146{
Alex Williamson04b16652010-07-02 11:13:17 -06001147 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001148 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001149
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001150 assert(size != 0); /* it would hand out same offset multiple times */
1151
Paolo Bonzinia3161032012-11-14 15:54:48 +01001152 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001153 return 0;
1154
Paolo Bonzinia3161032012-11-14 15:54:48 +01001155 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001156 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001157
1158 end = block->offset + block->length;
1159
Paolo Bonzinia3161032012-11-14 15:54:48 +01001160 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001161 if (next_block->offset >= end) {
1162 next = MIN(next, next_block->offset);
1163 }
1164 }
1165 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001166 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001167 mingap = next - end;
1168 }
1169 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001170
1171 if (offset == RAM_ADDR_MAX) {
1172 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1173 (uint64_t)size);
1174 abort();
1175 }
1176
Alex Williamson04b16652010-07-02 11:13:17 -06001177 return offset;
1178}
1179
Juan Quintela652d7ec2012-07-20 10:37:54 +02001180ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001181{
Alex Williamsond17b5282010-06-25 11:08:38 -06001182 RAMBlock *block;
1183 ram_addr_t last = 0;
1184
Paolo Bonzinia3161032012-11-14 15:54:48 +01001185 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001186 last = MAX(last, block->offset + block->length);
1187
1188 return last;
1189}
1190
Jason Baronddb97f12012-08-02 15:44:16 -04001191static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1192{
1193 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001194
1195 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001196 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1197 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001198 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1199 if (ret) {
1200 perror("qemu_madvise");
1201 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1202 "but dump_guest_core=off specified\n");
1203 }
1204 }
1205}
1206
Hu Tao20cfe882014-04-02 15:13:26 +08001207static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001208{
Hu Tao20cfe882014-04-02 15:13:26 +08001209 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001210
Paolo Bonzinia3161032012-11-14 15:54:48 +01001211 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001212 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001213 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001214 }
1215 }
Hu Tao20cfe882014-04-02 15:13:26 +08001216
1217 return NULL;
1218}
1219
1220void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1221{
1222 RAMBlock *new_block = find_ram_block(addr);
1223 RAMBlock *block;
1224
Avi Kivityc5705a72011-12-20 15:59:12 +02001225 assert(new_block);
1226 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001227
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001228 if (dev) {
1229 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001230 if (id) {
1231 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001232 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001233 }
1234 }
1235 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1236
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001237 /* This assumes the iothread lock is taken here too. */
1238 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001239 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001240 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001241 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1242 new_block->idstr);
1243 abort();
1244 }
1245 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001246 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001247}
1248
Hu Tao20cfe882014-04-02 15:13:26 +08001249void qemu_ram_unset_idstr(ram_addr_t addr)
1250{
1251 RAMBlock *block = find_ram_block(addr);
1252
1253 if (block) {
1254 memset(block->idstr, 0, sizeof(block->idstr));
1255 }
1256}
1257
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001258static int memory_try_enable_merging(void *addr, size_t len)
1259{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001260 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001261 /* disabled by the user */
1262 return 0;
1263 }
1264
1265 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1266}
1267
Hu Taoef701d72014-09-09 13:27:54 +08001268static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001269{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001270 RAMBlock *block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001271 ram_addr_t old_ram_size, new_ram_size;
1272
1273 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001274
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001275 /* This assumes the iothread lock is taken here too. */
1276 qemu_mutex_lock_ramlist();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001277 new_block->offset = find_ram_offset(new_block->length);
1278
1279 if (!new_block->host) {
1280 if (xen_enabled()) {
1281 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1282 } else {
1283 new_block->host = phys_mem_alloc(new_block->length);
Markus Armbruster39228252013-07-31 15:11:11 +02001284 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001285 error_setg_errno(errp, errno,
1286 "cannot set up guest memory '%s'",
1287 memory_region_name(new_block->mr));
1288 qemu_mutex_unlock_ramlist();
1289 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001290 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001291 memory_try_enable_merging(new_block->host, new_block->length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001292 }
1293 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001294
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001295 /* Keep the list sorted from biggest to smallest block. */
1296 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1297 if (block->length < new_block->length) {
1298 break;
1299 }
1300 }
1301 if (block) {
1302 QTAILQ_INSERT_BEFORE(block, new_block, next);
1303 } else {
1304 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1305 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001306 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001307
Umesh Deshpandef798b072011-08-18 11:41:17 -07001308 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001309 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001310
Juan Quintela2152f5c2013-10-08 13:52:02 +02001311 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1312
1313 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001314 int i;
1315 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1316 ram_list.dirty_memory[i] =
1317 bitmap_zero_extend(ram_list.dirty_memory[i],
1318 old_ram_size, new_ram_size);
1319 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001320 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001321 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001322
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001323 qemu_ram_setup_dump(new_block->host, new_block->length);
1324 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1325 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001326
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001327 if (kvm_enabled()) {
1328 kvm_setup_guest_memory(new_block->host, new_block->length);
1329 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001330
1331 return new_block->offset;
1332}
1333
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001334#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001335ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001336 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001337 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001338{
1339 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001340 ram_addr_t addr;
1341 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001342
1343 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001344 error_setg(errp, "-mem-path not supported with Xen");
1345 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001346 }
1347
1348 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1349 /*
1350 * file_ram_alloc() needs to allocate just like
1351 * phys_mem_alloc, but we haven't bothered to provide
1352 * a hook there.
1353 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001354 error_setg(errp,
1355 "-mem-path not supported with this accelerator");
1356 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001357 }
1358
1359 size = TARGET_PAGE_ALIGN(size);
1360 new_block = g_malloc0(sizeof(*new_block));
1361 new_block->mr = mr;
1362 new_block->length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001363 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001364 new_block->host = file_ram_alloc(new_block, size,
1365 mem_path, errp);
1366 if (!new_block->host) {
1367 g_free(new_block);
1368 return -1;
1369 }
1370
Hu Taoef701d72014-09-09 13:27:54 +08001371 addr = ram_block_add(new_block, &local_err);
1372 if (local_err) {
1373 g_free(new_block);
1374 error_propagate(errp, local_err);
1375 return -1;
1376 }
1377 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001378}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001379#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001380
1381ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Hu Taoef701d72014-09-09 13:27:54 +08001382 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001383{
1384 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001385 ram_addr_t addr;
1386 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001387
1388 size = TARGET_PAGE_ALIGN(size);
1389 new_block = g_malloc0(sizeof(*new_block));
1390 new_block->mr = mr;
1391 new_block->length = size;
1392 new_block->fd = -1;
1393 new_block->host = host;
1394 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001395 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001396 }
Hu Taoef701d72014-09-09 13:27:54 +08001397 addr = ram_block_add(new_block, &local_err);
1398 if (local_err) {
1399 g_free(new_block);
1400 error_propagate(errp, local_err);
1401 return -1;
1402 }
1403 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001404}
1405
Hu Taoef701d72014-09-09 13:27:54 +08001406ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001407{
Hu Taoef701d72014-09-09 13:27:54 +08001408 return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001409}
bellarde9a1ab12007-02-08 23:08:38 +00001410
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001411void qemu_ram_free_from_ptr(ram_addr_t addr)
1412{
1413 RAMBlock *block;
1414
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001415 /* This assumes the iothread lock is taken here too. */
1416 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001417 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001418 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001419 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001420 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001421 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001422 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001423 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001424 }
1425 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001426 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001427}
1428
Anthony Liguoric227f092009-10-01 16:12:16 -05001429void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001430{
Alex Williamson04b16652010-07-02 11:13:17 -06001431 RAMBlock *block;
1432
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001433 /* This assumes the iothread lock is taken here too. */
1434 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001435 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001436 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001437 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001438 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001439 ram_list.version++;
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001440 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001441 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001442 } else if (xen_enabled()) {
1443 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001444#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001445 } else if (block->fd >= 0) {
1446 munmap(block->host, block->length);
1447 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001448#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001449 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001450 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001451 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001452 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001453 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001454 }
1455 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001456 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001457
bellarde9a1ab12007-02-08 23:08:38 +00001458}
1459
Huang Yingcd19cfa2011-03-02 08:56:19 +01001460#ifndef _WIN32
1461void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1462{
1463 RAMBlock *block;
1464 ram_addr_t offset;
1465 int flags;
1466 void *area, *vaddr;
1467
Paolo Bonzinia3161032012-11-14 15:54:48 +01001468 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001469 offset = addr - block->offset;
1470 if (offset < block->length) {
1471 vaddr = block->host + offset;
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001472 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001473 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001474 } else if (xen_enabled()) {
1475 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001476 } else {
1477 flags = MAP_FIXED;
1478 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001479 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001480 flags |= (block->flags & RAM_SHARED ?
1481 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001482 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1483 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001484 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001485 /*
1486 * Remap needs to match alloc. Accelerators that
1487 * set phys_mem_alloc never remap. If they did,
1488 * we'd need a remap hook here.
1489 */
1490 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1491
Huang Yingcd19cfa2011-03-02 08:56:19 +01001492 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1493 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1494 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001495 }
1496 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001497 fprintf(stderr, "Could not remap addr: "
1498 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001499 length, addr);
1500 exit(1);
1501 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001502 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001503 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001504 }
1505 return;
1506 }
1507 }
1508}
1509#endif /* !_WIN32 */
1510
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001511int qemu_get_ram_fd(ram_addr_t addr)
1512{
1513 RAMBlock *block = qemu_get_ram_block(addr);
1514
1515 return block->fd;
1516}
1517
Damjan Marion3fd74b82014-06-26 23:01:32 +02001518void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1519{
1520 RAMBlock *block = qemu_get_ram_block(addr);
1521
1522 return block->host;
1523}
1524
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001525/* Return a host pointer to ram allocated with qemu_ram_alloc.
1526 With the exception of the softmmu code in this file, this should
1527 only be used for local memory (e.g. video ram) that the device owns,
1528 and knows it isn't going to access beyond the end of the block.
1529
1530 It should not be used for general purpose DMA.
1531 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1532 */
1533void *qemu_get_ram_ptr(ram_addr_t addr)
1534{
1535 RAMBlock *block = qemu_get_ram_block(addr);
1536
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001537 if (xen_enabled()) {
1538 /* We need to check if the requested address is in the RAM
1539 * because we don't want to map the entire memory in QEMU.
1540 * In that case just map until the end of the page.
1541 */
1542 if (block->offset == 0) {
1543 return xen_map_cache(addr, 0, 0);
1544 } else if (block->host == NULL) {
1545 block->host =
1546 xen_map_cache(block->offset, block->length, 1);
1547 }
1548 }
1549 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001550}
1551
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001552/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1553 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001554static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001555{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001556 if (*size == 0) {
1557 return NULL;
1558 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001559 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001560 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001561 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001562 RAMBlock *block;
1563
Paolo Bonzinia3161032012-11-14 15:54:48 +01001564 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001565 if (addr - block->offset < block->length) {
1566 if (addr - block->offset + *size > block->length)
1567 *size = block->length - addr + block->offset;
1568 return block->host + (addr - block->offset);
1569 }
1570 }
1571
1572 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1573 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001574 }
1575}
1576
Paolo Bonzini7443b432013-06-03 12:44:02 +02001577/* Some of the softmmu routines need to translate from a host pointer
1578 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001579MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001580{
pbrook94a6b542009-04-11 17:15:54 +00001581 RAMBlock *block;
1582 uint8_t *host = ptr;
1583
Jan Kiszka868bb332011-06-21 22:59:09 +02001584 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001585 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001586 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001587 }
1588
Paolo Bonzini23887b72013-05-06 14:28:39 +02001589 block = ram_list.mru_block;
1590 if (block && block->host && host - block->host < block->length) {
1591 goto found;
1592 }
1593
Paolo Bonzinia3161032012-11-14 15:54:48 +01001594 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001595 /* This case append when the block is not mapped. */
1596 if (block->host == NULL) {
1597 continue;
1598 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001599 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001600 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001601 }
pbrook94a6b542009-04-11 17:15:54 +00001602 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001603
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001604 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001605
1606found:
1607 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001608 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001609}
Alex Williamsonf471a172010-06-11 11:11:42 -06001610
Avi Kivitya8170e52012-10-23 12:30:10 +02001611static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001612 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001613{
Juan Quintela52159192013-10-08 12:44:04 +02001614 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001615 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001616 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001617 switch (size) {
1618 case 1:
1619 stb_p(qemu_get_ram_ptr(ram_addr), val);
1620 break;
1621 case 2:
1622 stw_p(qemu_get_ram_ptr(ram_addr), val);
1623 break;
1624 case 4:
1625 stl_p(qemu_get_ram_ptr(ram_addr), val);
1626 break;
1627 default:
1628 abort();
1629 }
Paolo Bonzini68868672014-07-21 16:45:18 +02001630 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
bellardf23db162005-08-21 19:12:28 +00001631 /* we remove the notdirty callback only if the code has been
1632 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001633 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001634 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001635 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001636 }
bellard1ccde1c2004-02-06 19:46:14 +00001637}
1638
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001639static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1640 unsigned size, bool is_write)
1641{
1642 return is_write;
1643}
1644
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001645static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001646 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001647 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001648 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001649};
1650
pbrook0f459d12008-06-09 00:20:13 +00001651/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001652static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001653{
Andreas Färber93afead2013-08-26 03:41:01 +02001654 CPUState *cpu = current_cpu;
1655 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001656 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001657 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001658 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001659 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001660
Andreas Färberff4700b2013-08-26 18:23:18 +02001661 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001662 /* We re-entered the check after replacing the TB. Now raise
1663 * the debug interrupt so that is will trigger after the
1664 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001665 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001666 return;
1667 }
Andreas Färber93afead2013-08-26 03:41:01 +02001668 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001669 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001670 if ((vaddr == (wp->vaddr & len_mask) ||
1671 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001672 wp->flags |= BP_WATCHPOINT_HIT;
Andreas Färberff4700b2013-08-26 18:23:18 +02001673 if (!cpu->watchpoint_hit) {
1674 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001675 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001676 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001677 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001678 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001679 } else {
1680 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001681 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001682 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001683 }
aliguori06d55cc2008-11-18 20:24:06 +00001684 }
aliguori6e140f22008-11-18 20:37:55 +00001685 } else {
1686 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001687 }
1688 }
1689}
1690
pbrook6658ffb2007-03-16 23:58:11 +00001691/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1692 so these check for a hit then pass through to the normal out-of-line
1693 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001694static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001695 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001696{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001697 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1698 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001699 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001700 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001701 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001702 default: abort();
1703 }
pbrook6658ffb2007-03-16 23:58:11 +00001704}
1705
Avi Kivitya8170e52012-10-23 12:30:10 +02001706static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001707 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001708{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001709 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1710 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001711 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001712 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001713 break;
1714 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001715 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001716 break;
1717 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001718 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001719 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001720 default: abort();
1721 }
pbrook6658ffb2007-03-16 23:58:11 +00001722}
1723
Avi Kivity1ec9b902012-01-02 12:47:48 +02001724static const MemoryRegionOps watch_mem_ops = {
1725 .read = watch_mem_read,
1726 .write = watch_mem_write,
1727 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001728};
pbrook6658ffb2007-03-16 23:58:11 +00001729
Avi Kivitya8170e52012-10-23 12:30:10 +02001730static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001731 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001732{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001733 subpage_t *subpage = opaque;
1734 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001735
blueswir1db7b5422007-05-26 17:36:03 +00001736#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001737 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001738 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001739#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001740 address_space_read(subpage->as, addr + subpage->base, buf, len);
1741 switch (len) {
1742 case 1:
1743 return ldub_p(buf);
1744 case 2:
1745 return lduw_p(buf);
1746 case 4:
1747 return ldl_p(buf);
1748 default:
1749 abort();
1750 }
blueswir1db7b5422007-05-26 17:36:03 +00001751}
1752
Avi Kivitya8170e52012-10-23 12:30:10 +02001753static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001754 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001755{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001756 subpage_t *subpage = opaque;
1757 uint8_t buf[4];
1758
blueswir1db7b5422007-05-26 17:36:03 +00001759#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001760 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001761 " value %"PRIx64"\n",
1762 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001763#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001764 switch (len) {
1765 case 1:
1766 stb_p(buf, value);
1767 break;
1768 case 2:
1769 stw_p(buf, value);
1770 break;
1771 case 4:
1772 stl_p(buf, value);
1773 break;
1774 default:
1775 abort();
1776 }
1777 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001778}
1779
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001780static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001781 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001782{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001783 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001784#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001785 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001786 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001787#endif
1788
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001789 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001790 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001791}
1792
Avi Kivity70c68e42012-01-02 12:32:48 +02001793static const MemoryRegionOps subpage_ops = {
1794 .read = subpage_read,
1795 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001796 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001797 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001798};
1799
Anthony Liguoric227f092009-10-01 16:12:16 -05001800static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001801 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001802{
1803 int idx, eidx;
1804
1805 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1806 return -1;
1807 idx = SUBPAGE_IDX(start);
1808 eidx = SUBPAGE_IDX(end);
1809#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001810 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1811 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001812#endif
blueswir1db7b5422007-05-26 17:36:03 +00001813 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001814 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001815 }
1816
1817 return 0;
1818}
1819
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001820static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001821{
Anthony Liguoric227f092009-10-01 16:12:16 -05001822 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001823
Anthony Liguori7267c092011-08-20 22:09:37 -05001824 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001825
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001826 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001827 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001828 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001829 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001830 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001831#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001832 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1833 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001834#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001835 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001836
1837 return mmio;
1838}
1839
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001840static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1841 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001842{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001843 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02001844 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001845 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02001846 .mr = mr,
1847 .offset_within_address_space = 0,
1848 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001849 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001850 };
1851
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001852 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001853}
1854
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001855MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001856{
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001857 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001858}
1859
Avi Kivitye9179ce2009-06-14 11:38:52 +03001860static void io_mem_init(void)
1861{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001862 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001863 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001864 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001865 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001866 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001867 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02001868 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001869}
1870
Avi Kivityac1970f2012-10-03 16:22:53 +02001871static void mem_begin(MemoryListener *listener)
1872{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001873 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001874 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1875 uint16_t n;
1876
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001877 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001878 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001879 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001880 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001881 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001882 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001883 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001884 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001885
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001886 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001887 d->as = as;
1888 as->next_dispatch = d;
1889}
1890
1891static void mem_commit(MemoryListener *listener)
1892{
1893 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001894 AddressSpaceDispatch *cur = as->dispatch;
1895 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001896
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001897 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001898
Paolo Bonzini0475d942013-05-29 12:28:21 +02001899 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001900
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001901 if (cur) {
1902 phys_sections_free(&cur->map);
1903 g_free(cur);
1904 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001905}
1906
Avi Kivity1d711482012-10-02 18:54:45 +02001907static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001908{
Andreas Färber182735e2013-05-29 22:29:20 +02001909 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001910
1911 /* since each CPU stores ram addresses in its TLB cache, we must
1912 reset the modified entries */
1913 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001914 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01001915 /* FIXME: Disentangle the cpu.h circular files deps so we can
1916 directly get the right CPU from listener. */
1917 if (cpu->tcg_as_listener != listener) {
1918 continue;
1919 }
Andreas Färber00c8cb02013-09-04 02:19:44 +02001920 tlb_flush(cpu, 1);
Avi Kivity117712c2012-02-12 21:23:17 +02001921 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001922}
1923
Avi Kivity93632742012-02-08 16:54:16 +02001924static void core_log_global_start(MemoryListener *listener)
1925{
Juan Quintela981fdf22013-10-10 11:54:09 +02001926 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001927}
1928
1929static void core_log_global_stop(MemoryListener *listener)
1930{
Juan Quintela981fdf22013-10-10 11:54:09 +02001931 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001932}
1933
Avi Kivity93632742012-02-08 16:54:16 +02001934static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001935 .log_global_start = core_log_global_start,
1936 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001937 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001938};
1939
Avi Kivityac1970f2012-10-03 16:22:53 +02001940void address_space_init_dispatch(AddressSpace *as)
1941{
Paolo Bonzini00752702013-05-29 12:13:54 +02001942 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001943 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001944 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001945 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001946 .region_add = mem_add,
1947 .region_nop = mem_add,
1948 .priority = 0,
1949 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001950 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001951}
1952
Avi Kivity83f3c252012-10-07 12:59:55 +02001953void address_space_destroy_dispatch(AddressSpace *as)
1954{
1955 AddressSpaceDispatch *d = as->dispatch;
1956
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001957 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001958 g_free(d);
1959 as->dispatch = NULL;
1960}
1961
Avi Kivity62152b82011-07-26 14:26:14 +03001962static void memory_map_init(void)
1963{
Anthony Liguori7267c092011-08-20 22:09:37 -05001964 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001965
Paolo Bonzini57271d62013-11-07 17:14:37 +01001966 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001967 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001968
Anthony Liguori7267c092011-08-20 22:09:37 -05001969 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001970 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1971 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001972 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001973
Avi Kivityf6790af2012-10-02 20:13:51 +02001974 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001975}
1976
1977MemoryRegion *get_system_memory(void)
1978{
1979 return system_memory;
1980}
1981
Avi Kivity309cb472011-08-08 16:09:03 +03001982MemoryRegion *get_system_io(void)
1983{
1984 return system_io;
1985}
1986
pbrooke2eef172008-06-08 01:09:01 +00001987#endif /* !defined(CONFIG_USER_ONLY) */
1988
bellard13eb76e2004-01-24 15:23:36 +00001989/* physical memory access (slow version, mainly for debug) */
1990#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001991int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001992 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001993{
1994 int l, flags;
1995 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001996 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001997
1998 while (len > 0) {
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2001 if (l > len)
2002 l = len;
2003 flags = page_get_flags(page);
2004 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002005 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002006 if (is_write) {
2007 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002008 return -1;
bellard579a97f2007-11-11 14:26:47 +00002009 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002010 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002011 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002012 memcpy(p, buf, l);
2013 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002014 } else {
2015 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002016 return -1;
bellard579a97f2007-11-11 14:26:47 +00002017 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002018 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002019 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002020 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002021 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002022 }
2023 len -= l;
2024 buf += l;
2025 addr += l;
2026 }
Paul Brooka68fe892010-03-01 00:08:59 +00002027 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002028}
bellard8df1cd02005-01-28 22:37:22 +00002029
bellard13eb76e2004-01-24 15:23:36 +00002030#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002031
Avi Kivitya8170e52012-10-23 12:30:10 +02002032static void invalidate_and_set_dirty(hwaddr addr,
2033 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002034{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002035 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002036 /* invalidate code */
2037 tb_invalidate_phys_page_range(addr, addr + length, 0);
2038 /* set dirty bit */
Paolo Bonzini68868672014-07-21 16:45:18 +02002039 cpu_physical_memory_set_dirty_range_nocode(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002040 }
Anthony PERARDe2269392012-10-03 13:49:22 +00002041 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002042}
2043
Richard Henderson23326162013-07-08 14:55:59 -07002044static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002045{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002046 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002047
2048 /* Regions are assumed to support 1-4 byte accesses unless
2049 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002050 if (access_size_max == 0) {
2051 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002052 }
Richard Henderson23326162013-07-08 14:55:59 -07002053
2054 /* Bound the maximum access by the alignment of the address. */
2055 if (!mr->ops->impl.unaligned) {
2056 unsigned align_size_max = addr & -addr;
2057 if (align_size_max != 0 && align_size_max < access_size_max) {
2058 access_size_max = align_size_max;
2059 }
2060 }
2061
2062 /* Don't attempt accesses larger than the maximum. */
2063 if (l > access_size_max) {
2064 l = access_size_max;
2065 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002066 if (l & (l - 1)) {
2067 l = 1 << (qemu_fls(l) - 1);
2068 }
Richard Henderson23326162013-07-08 14:55:59 -07002069
2070 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002071}
2072
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002073bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002074 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002075{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002076 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002077 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002078 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002079 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002080 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002081 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002082
bellard13eb76e2004-01-24 15:23:36 +00002083 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002084 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002085 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002086
bellard13eb76e2004-01-24 15:23:36 +00002087 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002088 if (!memory_access_is_direct(mr, is_write)) {
2089 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002090 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002091 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002092 switch (l) {
2093 case 8:
2094 /* 64 bit write access */
2095 val = ldq_p(buf);
2096 error |= io_mem_write(mr, addr1, val, 8);
2097 break;
2098 case 4:
bellard1c213d12005-09-03 10:49:04 +00002099 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002100 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002101 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002102 break;
2103 case 2:
bellard1c213d12005-09-03 10:49:04 +00002104 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002105 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002106 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002107 break;
2108 case 1:
bellard1c213d12005-09-03 10:49:04 +00002109 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002110 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002111 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002112 break;
2113 default:
2114 abort();
bellard13eb76e2004-01-24 15:23:36 +00002115 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002116 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002117 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002118 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002119 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002120 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002121 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002122 }
2123 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002124 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002125 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002126 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002127 switch (l) {
2128 case 8:
2129 /* 64 bit read access */
2130 error |= io_mem_read(mr, addr1, &val, 8);
2131 stq_p(buf, val);
2132 break;
2133 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002134 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002135 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002136 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002137 break;
2138 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002139 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002140 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002141 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002142 break;
2143 case 1:
bellard1c213d12005-09-03 10:49:04 +00002144 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002145 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002146 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002147 break;
2148 default:
2149 abort();
bellard13eb76e2004-01-24 15:23:36 +00002150 }
2151 } else {
2152 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002153 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002154 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002155 }
2156 }
2157 len -= l;
2158 buf += l;
2159 addr += l;
2160 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002161
2162 return error;
bellard13eb76e2004-01-24 15:23:36 +00002163}
bellard8df1cd02005-01-28 22:37:22 +00002164
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002165bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002166 const uint8_t *buf, int len)
2167{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002168 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002169}
2170
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002171bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002172{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002173 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002174}
2175
2176
Avi Kivitya8170e52012-10-23 12:30:10 +02002177void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002178 int len, int is_write)
2179{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002180 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002181}
2182
Alexander Graf582b55a2013-12-11 14:17:44 +01002183enum write_rom_type {
2184 WRITE_DATA,
2185 FLUSH_CACHE,
2186};
2187
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002188static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002189 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002190{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002191 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002192 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002193 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002194 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002195
bellardd0ecd2a2006-04-23 17:14:48 +00002196 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002197 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002198 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002199
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002200 if (!(memory_region_is_ram(mr) ||
2201 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002202 /* do nothing */
2203 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002204 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002205 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002206 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002207 switch (type) {
2208 case WRITE_DATA:
2209 memcpy(ptr, buf, l);
2210 invalidate_and_set_dirty(addr1, l);
2211 break;
2212 case FLUSH_CACHE:
2213 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2214 break;
2215 }
bellardd0ecd2a2006-04-23 17:14:48 +00002216 }
2217 len -= l;
2218 buf += l;
2219 addr += l;
2220 }
2221}
2222
Alexander Graf582b55a2013-12-11 14:17:44 +01002223/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002224void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002225 const uint8_t *buf, int len)
2226{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002227 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002228}
2229
2230void cpu_flush_icache_range(hwaddr start, int len)
2231{
2232 /*
2233 * This function should do the same thing as an icache flush that was
2234 * triggered from within the guest. For TCG we are always cache coherent,
2235 * so there is no need to flush anything. For KVM / Xen we need to flush
2236 * the host's instruction cache at least.
2237 */
2238 if (tcg_enabled()) {
2239 return;
2240 }
2241
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002242 cpu_physical_memory_write_rom_internal(&address_space_memory,
2243 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002244}
2245
aliguori6d16c2f2009-01-22 16:59:11 +00002246typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002247 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002248 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002249 hwaddr addr;
2250 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002251} BounceBuffer;
2252
2253static BounceBuffer bounce;
2254
aliguoriba223c22009-01-22 16:59:16 +00002255typedef struct MapClient {
2256 void *opaque;
2257 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002258 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002259} MapClient;
2260
Blue Swirl72cf2d42009-09-12 07:36:22 +00002261static QLIST_HEAD(map_client_list, MapClient) map_client_list
2262 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002263
2264void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2265{
Anthony Liguori7267c092011-08-20 22:09:37 -05002266 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002267
2268 client->opaque = opaque;
2269 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002270 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002271 return client;
2272}
2273
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002274static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002275{
2276 MapClient *client = (MapClient *)_client;
2277
Blue Swirl72cf2d42009-09-12 07:36:22 +00002278 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002279 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002280}
2281
2282static void cpu_notify_map_clients(void)
2283{
2284 MapClient *client;
2285
Blue Swirl72cf2d42009-09-12 07:36:22 +00002286 while (!QLIST_EMPTY(&map_client_list)) {
2287 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002288 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002289 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002290 }
2291}
2292
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002293bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2294{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002295 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002296 hwaddr l, xlat;
2297
2298 while (len > 0) {
2299 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002300 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2301 if (!memory_access_is_direct(mr, is_write)) {
2302 l = memory_access_size(mr, l, addr);
2303 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002304 return false;
2305 }
2306 }
2307
2308 len -= l;
2309 addr += l;
2310 }
2311 return true;
2312}
2313
aliguori6d16c2f2009-01-22 16:59:11 +00002314/* Map a physical memory region into a host virtual address.
2315 * May map a subset of the requested range, given by and returned in *plen.
2316 * May return NULL if resources needed to perform the mapping are exhausted.
2317 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002318 * Use cpu_register_map_client() to know when retrying the map operation is
2319 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002320 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002321void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002322 hwaddr addr,
2323 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002324 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002325{
Avi Kivitya8170e52012-10-23 12:30:10 +02002326 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002327 hwaddr done = 0;
2328 hwaddr l, xlat, base;
2329 MemoryRegion *mr, *this_mr;
2330 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002331
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002332 if (len == 0) {
2333 return NULL;
2334 }
aliguori6d16c2f2009-01-22 16:59:11 +00002335
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002336 l = len;
2337 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2338 if (!memory_access_is_direct(mr, is_write)) {
2339 if (bounce.buffer) {
2340 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002341 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002342 /* Avoid unbounded allocations */
2343 l = MIN(l, TARGET_PAGE_SIZE);
2344 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002345 bounce.addr = addr;
2346 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002347
2348 memory_region_ref(mr);
2349 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002350 if (!is_write) {
2351 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002352 }
aliguori6d16c2f2009-01-22 16:59:11 +00002353
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002354 *plen = l;
2355 return bounce.buffer;
2356 }
2357
2358 base = xlat;
2359 raddr = memory_region_get_ram_addr(mr);
2360
2361 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002362 len -= l;
2363 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002364 done += l;
2365 if (len == 0) {
2366 break;
2367 }
2368
2369 l = len;
2370 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2371 if (this_mr != mr || xlat != base + done) {
2372 break;
2373 }
aliguori6d16c2f2009-01-22 16:59:11 +00002374 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002375
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002376 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002377 *plen = done;
2378 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002379}
2380
Avi Kivityac1970f2012-10-03 16:22:53 +02002381/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002382 * Will also mark the memory as dirty if is_write == 1. access_len gives
2383 * the amount of memory that was actually read or written by the caller.
2384 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002385void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2386 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002387{
2388 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002389 MemoryRegion *mr;
2390 ram_addr_t addr1;
2391
2392 mr = qemu_ram_addr_from_host(buffer, &addr1);
2393 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002394 if (is_write) {
Paolo Bonzini68868672014-07-21 16:45:18 +02002395 invalidate_and_set_dirty(addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002396 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002397 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002398 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002399 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002400 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002401 return;
2402 }
2403 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002404 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002405 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002406 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002407 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002408 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002409 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002410}
bellardd0ecd2a2006-04-23 17:14:48 +00002411
Avi Kivitya8170e52012-10-23 12:30:10 +02002412void *cpu_physical_memory_map(hwaddr addr,
2413 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002414 int is_write)
2415{
2416 return address_space_map(&address_space_memory, addr, plen, is_write);
2417}
2418
Avi Kivitya8170e52012-10-23 12:30:10 +02002419void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2420 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002421{
2422 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2423}
2424
bellard8df1cd02005-01-28 22:37:22 +00002425/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002426static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002427 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002428{
bellard8df1cd02005-01-28 22:37:22 +00002429 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002430 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002431 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002432 hwaddr l = 4;
2433 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002434
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002435 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002436 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002437 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002438 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002439#if defined(TARGET_WORDS_BIGENDIAN)
2440 if (endian == DEVICE_LITTLE_ENDIAN) {
2441 val = bswap32(val);
2442 }
2443#else
2444 if (endian == DEVICE_BIG_ENDIAN) {
2445 val = bswap32(val);
2446 }
2447#endif
bellard8df1cd02005-01-28 22:37:22 +00002448 } else {
2449 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002450 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002451 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002452 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002453 switch (endian) {
2454 case DEVICE_LITTLE_ENDIAN:
2455 val = ldl_le_p(ptr);
2456 break;
2457 case DEVICE_BIG_ENDIAN:
2458 val = ldl_be_p(ptr);
2459 break;
2460 default:
2461 val = ldl_p(ptr);
2462 break;
2463 }
bellard8df1cd02005-01-28 22:37:22 +00002464 }
2465 return val;
2466}
2467
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002468uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002469{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002470 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471}
2472
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002473uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002474{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002475 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002476}
2477
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002478uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002479{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002480 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002481}
2482
bellard84b7b8e2005-11-28 21:19:04 +00002483/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002484static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002485 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002486{
bellard84b7b8e2005-11-28 21:19:04 +00002487 uint8_t *ptr;
2488 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002489 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002490 hwaddr l = 8;
2491 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002492
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002493 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002494 false);
2495 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002496 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002497 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002498#if defined(TARGET_WORDS_BIGENDIAN)
2499 if (endian == DEVICE_LITTLE_ENDIAN) {
2500 val = bswap64(val);
2501 }
2502#else
2503 if (endian == DEVICE_BIG_ENDIAN) {
2504 val = bswap64(val);
2505 }
2506#endif
bellard84b7b8e2005-11-28 21:19:04 +00002507 } else {
2508 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002509 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002510 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002511 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002512 switch (endian) {
2513 case DEVICE_LITTLE_ENDIAN:
2514 val = ldq_le_p(ptr);
2515 break;
2516 case DEVICE_BIG_ENDIAN:
2517 val = ldq_be_p(ptr);
2518 break;
2519 default:
2520 val = ldq_p(ptr);
2521 break;
2522 }
bellard84b7b8e2005-11-28 21:19:04 +00002523 }
2524 return val;
2525}
2526
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002527uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002528{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002529 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002530}
2531
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002532uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002534 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535}
2536
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002537uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002538{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002539 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002540}
2541
bellardaab33092005-10-30 20:48:42 +00002542/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002543uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002544{
2545 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002546 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002547 return val;
2548}
2549
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002550/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002551static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002552 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002553{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002554 uint8_t *ptr;
2555 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002556 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002557 hwaddr l = 2;
2558 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002559
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002560 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002561 false);
2562 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002563 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002564 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002565#if defined(TARGET_WORDS_BIGENDIAN)
2566 if (endian == DEVICE_LITTLE_ENDIAN) {
2567 val = bswap16(val);
2568 }
2569#else
2570 if (endian == DEVICE_BIG_ENDIAN) {
2571 val = bswap16(val);
2572 }
2573#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002574 } else {
2575 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002576 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002577 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002578 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002579 switch (endian) {
2580 case DEVICE_LITTLE_ENDIAN:
2581 val = lduw_le_p(ptr);
2582 break;
2583 case DEVICE_BIG_ENDIAN:
2584 val = lduw_be_p(ptr);
2585 break;
2586 default:
2587 val = lduw_p(ptr);
2588 break;
2589 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002590 }
2591 return val;
bellardaab33092005-10-30 20:48:42 +00002592}
2593
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002594uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002595{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002596 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002597}
2598
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002599uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002600{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002601 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002602}
2603
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002604uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002605{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002606 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002607}
2608
bellard8df1cd02005-01-28 22:37:22 +00002609/* warning: addr must be aligned. The ram page is not masked as dirty
2610 and the code inside is not invalidated. It is useful if the dirty
2611 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002612void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002613{
bellard8df1cd02005-01-28 22:37:22 +00002614 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002615 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002616 hwaddr l = 4;
2617 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002618
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002619 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002620 true);
2621 if (l < 4 || !memory_access_is_direct(mr, true)) {
2622 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002623 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002624 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002625 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002626 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002627
2628 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002629 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002630 /* invalidate code */
2631 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2632 /* set dirty bit */
Paolo Bonzini68868672014-07-21 16:45:18 +02002633 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
aliguori74576192008-10-06 14:02:03 +00002634 }
2635 }
bellard8df1cd02005-01-28 22:37:22 +00002636 }
2637}
2638
2639/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002640static inline void stl_phys_internal(AddressSpace *as,
2641 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002642 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002643{
bellard8df1cd02005-01-28 22:37:22 +00002644 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002645 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002646 hwaddr l = 4;
2647 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002648
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002649 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002650 true);
2651 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002652#if defined(TARGET_WORDS_BIGENDIAN)
2653 if (endian == DEVICE_LITTLE_ENDIAN) {
2654 val = bswap32(val);
2655 }
2656#else
2657 if (endian == DEVICE_BIG_ENDIAN) {
2658 val = bswap32(val);
2659 }
2660#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002661 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002662 } else {
bellard8df1cd02005-01-28 22:37:22 +00002663 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002664 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002665 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002666 switch (endian) {
2667 case DEVICE_LITTLE_ENDIAN:
2668 stl_le_p(ptr, val);
2669 break;
2670 case DEVICE_BIG_ENDIAN:
2671 stl_be_p(ptr, val);
2672 break;
2673 default:
2674 stl_p(ptr, val);
2675 break;
2676 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002677 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002678 }
2679}
2680
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002681void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002682{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002683 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002684}
2685
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002686void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002687{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002688 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002689}
2690
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002691void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002692{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002693 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002694}
2695
bellardaab33092005-10-30 20:48:42 +00002696/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002697void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002698{
2699 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002700 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002701}
2702
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002703/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002704static inline void stw_phys_internal(AddressSpace *as,
2705 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002706 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002707{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002708 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002709 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002710 hwaddr l = 2;
2711 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002712
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002713 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002714 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002715#if defined(TARGET_WORDS_BIGENDIAN)
2716 if (endian == DEVICE_LITTLE_ENDIAN) {
2717 val = bswap16(val);
2718 }
2719#else
2720 if (endian == DEVICE_BIG_ENDIAN) {
2721 val = bswap16(val);
2722 }
2723#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002724 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002725 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002726 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002727 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002728 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002729 switch (endian) {
2730 case DEVICE_LITTLE_ENDIAN:
2731 stw_le_p(ptr, val);
2732 break;
2733 case DEVICE_BIG_ENDIAN:
2734 stw_be_p(ptr, val);
2735 break;
2736 default:
2737 stw_p(ptr, val);
2738 break;
2739 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002740 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002741 }
bellardaab33092005-10-30 20:48:42 +00002742}
2743
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002744void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002745{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002746 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002747}
2748
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002749void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002750{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002751 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002752}
2753
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002754void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002755{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002756 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002757}
2758
bellardaab33092005-10-30 20:48:42 +00002759/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002760void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002761{
2762 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002763 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002764}
2765
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002766void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002767{
2768 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002769 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002770}
2771
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002772void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002773{
2774 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002775 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002776}
2777
aliguori5e2972f2009-03-28 17:51:36 +00002778/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002779int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002780 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002781{
2782 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002783 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002784 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002785
2786 while (len > 0) {
2787 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002788 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002789 /* if no physical page mapped, return an error */
2790 if (phys_addr == -1)
2791 return -1;
2792 l = (page + TARGET_PAGE_SIZE) - addr;
2793 if (l > len)
2794 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002795 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002796 if (is_write) {
2797 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2798 } else {
2799 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2800 }
bellard13eb76e2004-01-24 15:23:36 +00002801 len -= l;
2802 buf += l;
2803 addr += l;
2804 }
2805 return 0;
2806}
Paul Brooka68fe892010-03-01 00:08:59 +00002807#endif
bellard13eb76e2004-01-24 15:23:36 +00002808
Blue Swirl8e4a4242013-01-06 18:30:17 +00002809/*
2810 * A helper function for the _utterly broken_ virtio device model to find out if
2811 * it's running on a big endian machine. Don't do this at home kids!
2812 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02002813bool target_words_bigendian(void);
2814bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00002815{
2816#if defined(TARGET_WORDS_BIGENDIAN)
2817 return true;
2818#else
2819 return false;
2820#endif
2821}
2822
Wen Congyang76f35532012-05-07 12:04:18 +08002823#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002824bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002825{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002826 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002827 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002828
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002829 mr = address_space_translate(&address_space_memory,
2830 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002831
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002832 return !(memory_region_is_ram(mr) ||
2833 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002834}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002835
2836void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2837{
2838 RAMBlock *block;
2839
2840 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2841 func(block->host, block->offset, block->length, opaque);
2842 }
2843}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002844#endif