blob: 1fa27f5c08f92cd088802b9202079df772030635 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020093__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300481static bool cpu_common_crash_occurred_needed(void *opaque)
482{
483 CPUState *cpu = opaque;
484
485 return cpu->crash_occurred;
486}
487
488static const VMStateDescription vmstate_cpu_common_crash_occurred = {
489 .name = "cpu_common/crash_occurred",
490 .version_id = 1,
491 .minimum_version_id = 1,
492 .needed = cpu_common_crash_occurred_needed,
493 .fields = (VMStateField[]) {
494 VMSTATE_BOOL(crash_occurred, CPUState),
495 VMSTATE_END_OF_LIST()
496 }
497};
498
Andreas Färber1a1562f2013-06-17 04:09:11 +0200499const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200500 .name = "cpu_common",
501 .version_id = 1,
502 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400503 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200504 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200505 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100506 VMSTATE_UINT32(halted, CPUState),
507 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200508 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400509 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200510 .subsections = (const VMStateDescription*[]) {
511 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300512 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200513 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200514 }
515};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200516
pbrook9656f322008-07-01 20:01:19 +0000517#endif
518
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100519CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400520{
Andreas Färberbdc44642013-06-24 23:50:24 +0200521 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400522
Andreas Färberbdc44642013-06-24 23:50:24 +0200523 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100524 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200525 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100526 }
Glauber Costa950f1472009-06-09 12:15:18 -0400527 }
528
Andreas Färberbdc44642013-06-24 23:50:24 +0200529 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400530}
531
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000532#if !defined(CONFIG_USER_ONLY)
533void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
534{
535 /* We only support one address space per cpu at the moment. */
536 assert(cpu->as == as);
537
538 if (cpu->tcg_as_listener) {
539 memory_listener_unregister(cpu->tcg_as_listener);
540 } else {
541 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
542 }
543 cpu->tcg_as_listener->commit = tcg_commit;
544 memory_listener_register(cpu->tcg_as_listener, as);
545}
546#endif
547
Bharata B Raob7bca732015-06-23 19:31:13 -0700548#ifndef CONFIG_USER_ONLY
549static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
550
551static int cpu_get_free_index(Error **errp)
552{
553 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
554
555 if (cpu >= MAX_CPUMASK_BITS) {
556 error_setg(errp, "Trying to use more CPUs than max of %d",
557 MAX_CPUMASK_BITS);
558 return -1;
559 }
560
561 bitmap_set(cpu_index_map, cpu, 1);
562 return cpu;
563}
564
565void cpu_exec_exit(CPUState *cpu)
566{
567 if (cpu->cpu_index == -1) {
568 /* cpu_index was never allocated by this @cpu or was already freed. */
569 return;
570 }
571
572 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
573 cpu->cpu_index = -1;
574}
575#else
576
577static int cpu_get_free_index(Error **errp)
578{
579 CPUState *some_cpu;
580 int cpu_index = 0;
581
582 CPU_FOREACH(some_cpu) {
583 cpu_index++;
584 }
585 return cpu_index;
586}
587
588void cpu_exec_exit(CPUState *cpu)
589{
590}
591#endif
592
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700593void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000594{
Andreas Färberb170fce2013-01-20 20:23:22 +0100595 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000596 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700597 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000598
Eduardo Habkost291135b2015-04-27 17:00:33 -0300599#ifndef CONFIG_USER_ONLY
600 cpu->as = &address_space_memory;
601 cpu->thread_id = qemu_get_thread_id();
602 cpu_reload_memory_map(cpu);
603#endif
604
pbrookc2764712009-03-07 15:24:59 +0000605#if defined(CONFIG_USER_ONLY)
606 cpu_list_lock();
607#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700608 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
609 if (local_err) {
610 error_propagate(errp, local_err);
611#if defined(CONFIG_USER_ONLY)
612 cpu_list_unlock();
613#endif
614 return;
bellard6a00d602005-11-21 23:25:50 +0000615 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200616 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000617#if defined(CONFIG_USER_ONLY)
618 cpu_list_unlock();
619#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200620 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
621 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
622 }
pbrookb3c77242008-06-30 16:31:04 +0000623#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600624 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700625 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100626 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200627 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000628#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100629 if (cc->vmsd != NULL) {
630 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
631 }
bellardfd6ce8f2003-05-14 19:00:11 +0000632}
633
Paul Brook94df27f2010-02-28 23:47:45 +0000634#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200635static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000636{
637 tb_invalidate_phys_page_range(pc, pc + 1, 0);
638}
639#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200640static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400641{
Max Filippove8262a12013-09-27 22:29:17 +0400642 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
643 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000644 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100645 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400646 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400647}
bellardc27004e2005-01-03 23:35:10 +0000648#endif
bellardd720b932004-04-25 17:57:43 +0000649
Paul Brookc527ee82010-03-01 03:31:14 +0000650#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200651void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000652
653{
654}
655
Peter Maydell3ee887e2014-09-12 14:06:48 +0100656int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
657 int flags)
658{
659 return -ENOSYS;
660}
661
662void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
663{
664}
665
Andreas Färber75a34032013-09-02 16:57:02 +0200666int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000667 int flags, CPUWatchpoint **watchpoint)
668{
669 return -ENOSYS;
670}
671#else
pbrook6658ffb2007-03-16 23:58:11 +0000672/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200673int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000674 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000675{
aliguoric0ce9982008-11-25 22:13:57 +0000676 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000677
Peter Maydell05068c02014-09-12 14:06:48 +0100678 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700679 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200680 error_report("tried to set invalid watchpoint at %"
681 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000682 return -EINVAL;
683 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500684 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000685
aliguoria1d1bb32008-11-18 20:07:32 +0000686 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100687 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000688 wp->flags = flags;
689
aliguori2dc9f412008-11-18 20:56:59 +0000690 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200691 if (flags & BP_GDB) {
692 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
693 } else {
694 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
695 }
aliguoria1d1bb32008-11-18 20:07:32 +0000696
Andreas Färber31b030d2013-09-04 01:29:02 +0200697 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000698
699 if (watchpoint)
700 *watchpoint = wp;
701 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000702}
703
aliguoria1d1bb32008-11-18 20:07:32 +0000704/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200705int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000706 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000707{
aliguoria1d1bb32008-11-18 20:07:32 +0000708 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000709
Andreas Färberff4700b2013-08-26 18:23:18 +0200710 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100711 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000712 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200713 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000714 return 0;
715 }
716 }
aliguoria1d1bb32008-11-18 20:07:32 +0000717 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000718}
719
aliguoria1d1bb32008-11-18 20:07:32 +0000720/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200721void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000722{
Andreas Färberff4700b2013-08-26 18:23:18 +0200723 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000724
Andreas Färber31b030d2013-09-04 01:29:02 +0200725 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000726
Anthony Liguori7267c092011-08-20 22:09:37 -0500727 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000728}
729
aliguoria1d1bb32008-11-18 20:07:32 +0000730/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200731void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000732{
aliguoric0ce9982008-11-25 22:13:57 +0000733 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000734
Andreas Färberff4700b2013-08-26 18:23:18 +0200735 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200736 if (wp->flags & mask) {
737 cpu_watchpoint_remove_by_ref(cpu, wp);
738 }
aliguoric0ce9982008-11-25 22:13:57 +0000739 }
aliguoria1d1bb32008-11-18 20:07:32 +0000740}
Peter Maydell05068c02014-09-12 14:06:48 +0100741
742/* Return true if this watchpoint address matches the specified
743 * access (ie the address range covered by the watchpoint overlaps
744 * partially or completely with the address range covered by the
745 * access).
746 */
747static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
748 vaddr addr,
749 vaddr len)
750{
751 /* We know the lengths are non-zero, but a little caution is
752 * required to avoid errors in the case where the range ends
753 * exactly at the top of the address space and so addr + len
754 * wraps round to zero.
755 */
756 vaddr wpend = wp->vaddr + wp->len - 1;
757 vaddr addrend = addr + len - 1;
758
759 return !(addr > wpend || wp->vaddr > addrend);
760}
761
Paul Brookc527ee82010-03-01 03:31:14 +0000762#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000763
764/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200765int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000766 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000767{
aliguoric0ce9982008-11-25 22:13:57 +0000768 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000769
Anthony Liguori7267c092011-08-20 22:09:37 -0500770 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000771
772 bp->pc = pc;
773 bp->flags = flags;
774
aliguori2dc9f412008-11-18 20:56:59 +0000775 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200776 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200777 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200778 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200779 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200780 }
aliguoria1d1bb32008-11-18 20:07:32 +0000781
Andreas Färberf0c3c502013-08-26 21:22:53 +0200782 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000783
Andreas Färber00b941e2013-06-29 18:55:54 +0200784 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000785 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200786 }
aliguoria1d1bb32008-11-18 20:07:32 +0000787 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000788}
789
790/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200791int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000792{
aliguoria1d1bb32008-11-18 20:07:32 +0000793 CPUBreakpoint *bp;
794
Andreas Färberf0c3c502013-08-26 21:22:53 +0200795 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000796 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200797 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000798 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000799 }
bellard4c3a88a2003-07-26 12:06:08 +0000800 }
aliguoria1d1bb32008-11-18 20:07:32 +0000801 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000802}
803
aliguoria1d1bb32008-11-18 20:07:32 +0000804/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200805void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000806{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200807 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
808
809 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000810
Anthony Liguori7267c092011-08-20 22:09:37 -0500811 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000812}
813
814/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200815void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000816{
aliguoric0ce9982008-11-25 22:13:57 +0000817 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000818
Andreas Färberf0c3c502013-08-26 21:22:53 +0200819 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820 if (bp->flags & mask) {
821 cpu_breakpoint_remove_by_ref(cpu, bp);
822 }
aliguoric0ce9982008-11-25 22:13:57 +0000823 }
bellard4c3a88a2003-07-26 12:06:08 +0000824}
825
bellardc33a3462003-07-29 20:50:33 +0000826/* enable or disable single step mode. EXCP_DEBUG is returned by the
827 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200828void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000829{
Andreas Färbered2803d2013-06-21 20:20:45 +0200830 if (cpu->singlestep_enabled != enabled) {
831 cpu->singlestep_enabled = enabled;
832 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200833 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200834 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100835 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000836 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700837 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000838 }
bellardc33a3462003-07-29 20:50:33 +0000839 }
bellardc33a3462003-07-29 20:50:33 +0000840}
841
Andreas Färbera47dddd2013-09-03 17:38:47 +0200842void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000843{
844 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000845 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000846
847 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000848 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000849 fprintf(stderr, "qemu: fatal: ");
850 vfprintf(stderr, fmt, ap);
851 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200852 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000853 if (qemu_log_enabled()) {
854 qemu_log("qemu: fatal: ");
855 qemu_log_vprintf(fmt, ap2);
856 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200857 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000858 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000859 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000860 }
pbrook493ae1f2007-11-23 16:53:59 +0000861 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000862 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200863#if defined(CONFIG_USER_ONLY)
864 {
865 struct sigaction act;
866 sigfillset(&act.sa_mask);
867 act.sa_handler = SIG_DFL;
868 sigaction(SIGABRT, &act, NULL);
869 }
870#endif
bellard75012672003-06-21 13:11:07 +0000871 abort();
872}
873
bellard01243112004-01-04 15:48:17 +0000874#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400875/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200876static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
877{
878 RAMBlock *block;
879
Paolo Bonzini43771532013-09-09 17:58:40 +0200880 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200881 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200882 goto found;
883 }
Mike Day0dc3f442013-09-05 14:41:35 -0400884 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200885 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200886 goto found;
887 }
888 }
889
890 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
891 abort();
892
893found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200894 /* It is safe to write mru_block outside the iothread lock. This
895 * is what happens:
896 *
897 * mru_block = xxx
898 * rcu_read_unlock()
899 * xxx removed from list
900 * rcu_read_lock()
901 * read mru_block
902 * mru_block = NULL;
903 * call_rcu(reclaim_ramblock, xxx);
904 * rcu_read_unlock()
905 *
906 * atomic_rcu_set is not needed here. The block was already published
907 * when it was placed into the list. Here we're just making an extra
908 * copy of the pointer.
909 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200910 ram_list.mru_block = block;
911 return block;
912}
913
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200914static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000915{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700916 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200917 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200918 RAMBlock *block;
919 ram_addr_t end;
920
921 end = TARGET_PAGE_ALIGN(start + length);
922 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000923
Mike Day0dc3f442013-09-05 14:41:35 -0400924 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200925 block = qemu_get_ram_block(start);
926 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200927 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700928 CPU_FOREACH(cpu) {
929 tlb_reset_dirty(cpu, start1, length);
930 }
Mike Day0dc3f442013-09-05 14:41:35 -0400931 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200932}
933
934/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000935bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
936 ram_addr_t length,
937 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200938{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000939 unsigned long end, page;
940 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200941
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000942 if (length == 0) {
943 return false;
944 }
945
946 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
947 page = start >> TARGET_PAGE_BITS;
948 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
949 page, end - page);
950
951 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200952 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200953 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000954
955 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000956}
957
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100958/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200959hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200960 MemoryRegionSection *section,
961 target_ulong vaddr,
962 hwaddr paddr, hwaddr xlat,
963 int prot,
964 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000965{
Avi Kivitya8170e52012-10-23 12:30:10 +0200966 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000967 CPUWatchpoint *wp;
968
Blue Swirlcc5bea62012-04-14 14:56:48 +0000969 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000970 /* Normal RAM. */
971 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200972 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000973 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200974 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000975 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200976 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000977 }
978 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +0100979 AddressSpaceDispatch *d;
980
981 d = atomic_rcu_read(&section->address_space->dispatch);
982 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200983 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000984 }
985
986 /* Make accesses to pages with watchpoints go via the
987 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200988 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100989 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000990 /* Avoid trapping reads of pages with a write breakpoint. */
991 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200992 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000993 *address |= TLB_MMIO;
994 break;
995 }
996 }
997 }
998
999 return iotlb;
1000}
bellard9fa3e852004-01-04 18:06:42 +00001001#endif /* defined(CONFIG_USER_ONLY) */
1002
pbrooke2eef172008-06-08 01:09:01 +00001003#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001004
Anthony Liguoric227f092009-10-01 16:12:16 -05001005static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001006 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001007static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001008
Igor Mammedova2b257d2014-10-31 16:38:37 +00001009static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1010 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001011
1012/*
1013 * Set a custom physical guest memory alloator.
1014 * Accelerators with unusual needs may need this. Hopefully, we can
1015 * get rid of it eventually.
1016 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001017void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001018{
1019 phys_mem_alloc = alloc;
1020}
1021
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001022static uint16_t phys_section_add(PhysPageMap *map,
1023 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001024{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001025 /* The physical section number is ORed with a page-aligned
1026 * pointer to produce the iotlb entries. Thus it should
1027 * never overflow into the page-aligned value.
1028 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001029 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001030
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001031 if (map->sections_nb == map->sections_nb_alloc) {
1032 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1033 map->sections = g_renew(MemoryRegionSection, map->sections,
1034 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001035 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001036 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001037 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001038 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001039}
1040
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001041static void phys_section_destroy(MemoryRegion *mr)
1042{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001043 memory_region_unref(mr);
1044
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001045 if (mr->subpage) {
1046 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001047 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001048 g_free(subpage);
1049 }
1050}
1051
Paolo Bonzini60926662013-05-29 12:30:26 +02001052static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001053{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001054 while (map->sections_nb > 0) {
1055 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001056 phys_section_destroy(section->mr);
1057 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001058 g_free(map->sections);
1059 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001060}
1061
Avi Kivityac1970f2012-10-03 16:22:53 +02001062static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001063{
1064 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001065 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001066 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001067 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001068 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001069 MemoryRegionSection subsection = {
1070 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001071 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001072 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001073 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001074
Avi Kivityf3705d52012-03-08 16:16:34 +02001075 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001076
Avi Kivityf3705d52012-03-08 16:16:34 +02001077 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001078 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001079 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001080 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001081 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001082 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001083 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001084 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085 }
1086 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001087 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001088 subpage_register(subpage, start, end,
1089 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001090}
1091
1092
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001093static void register_multipage(AddressSpaceDispatch *d,
1094 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001095{
Avi Kivitya8170e52012-10-23 12:30:10 +02001096 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001097 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001098 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1099 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001100
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001101 assert(num_pages);
1102 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001103}
1104
Avi Kivityac1970f2012-10-03 16:22:53 +02001105static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001106{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001107 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001108 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001109 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001110 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001111
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001112 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1113 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1114 - now.offset_within_address_space;
1115
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001116 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001117 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001118 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001119 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001120 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001121 while (int128_ne(remain.size, now.size)) {
1122 remain.size = int128_sub(remain.size, now.size);
1123 remain.offset_within_address_space += int128_get64(now.size);
1124 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001125 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001126 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001127 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001128 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001129 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001130 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001131 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001133 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001134 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135 }
1136}
1137
Sheng Yang62a27442010-01-26 19:21:16 +08001138void qemu_flush_coalesced_mmio_buffer(void)
1139{
1140 if (kvm_enabled())
1141 kvm_flush_coalesced_mmio_buffer();
1142}
1143
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001144void qemu_mutex_lock_ramlist(void)
1145{
1146 qemu_mutex_lock(&ram_list.mutex);
1147}
1148
1149void qemu_mutex_unlock_ramlist(void)
1150{
1151 qemu_mutex_unlock(&ram_list.mutex);
1152}
1153
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001154#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001155
1156#include <sys/vfs.h>
1157
1158#define HUGETLBFS_MAGIC 0x958458f6
1159
Hu Taofc7a5802014-09-09 13:28:01 +08001160static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001161{
1162 struct statfs fs;
1163 int ret;
1164
1165 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001166 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001167 } while (ret != 0 && errno == EINTR);
1168
1169 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001170 error_setg_errno(errp, errno, "failed to get page size of file %s",
1171 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001172 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001173 }
1174
1175 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001176 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001177
1178 return fs.f_bsize;
1179}
1180
Alex Williamson04b16652010-07-02 11:13:17 -06001181static void *file_ram_alloc(RAMBlock *block,
1182 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001183 const char *path,
1184 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001185{
1186 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001187 char *sanitized_name;
1188 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001189 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001190 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001191 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001192 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001193
Hu Taofc7a5802014-09-09 13:28:01 +08001194 hpagesize = gethugepagesize(path, &local_err);
1195 if (local_err) {
1196 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001197 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001198 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001199 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001200
1201 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001202 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1203 "or larger than huge page size 0x%" PRIx64,
1204 memory, hpagesize);
1205 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001206 }
1207
1208 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001209 error_setg(errp,
1210 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001211 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001212 }
1213
Peter Feiner8ca761f2013-03-04 13:54:25 -05001214 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001215 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001216 for (c = sanitized_name; *c != '\0'; c++) {
1217 if (*c == '/')
1218 *c = '_';
1219 }
1220
1221 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1222 sanitized_name);
1223 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224
1225 fd = mkstemp(filename);
1226 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001227 error_setg_errno(errp, errno,
1228 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001229 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001230 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231 }
1232 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001233 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001234
Chen Hanxiao9284f312015-07-24 11:12:03 +08001235 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236
1237 /*
1238 * ftruncate is not supported by hugetlbfs in older
1239 * hosts, so don't bother bailing out on errors.
1240 * If anything goes wrong with it under other filesystems,
1241 * mmap will fail.
1242 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001243 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001244 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001245 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001246
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001247 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1248 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1249 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001250 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001251 error_setg_errno(errp, errno,
1252 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001253 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001254 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001255 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001256
1257 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001258 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001259 }
1260
Alex Williamson04b16652010-07-02 11:13:17 -06001261 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001262 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001263
1264error:
1265 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001266 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001267 exit(1);
1268 }
1269 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001270}
1271#endif
1272
Mike Day0dc3f442013-09-05 14:41:35 -04001273/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001274static ram_addr_t find_ram_offset(ram_addr_t size)
1275{
Alex Williamson04b16652010-07-02 11:13:17 -06001276 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001277 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001278
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001279 assert(size != 0); /* it would hand out same offset multiple times */
1280
Mike Day0dc3f442013-09-05 14:41:35 -04001281 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001282 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001283 }
Alex Williamson04b16652010-07-02 11:13:17 -06001284
Mike Day0dc3f442013-09-05 14:41:35 -04001285 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001286 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001287
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001288 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001289
Mike Day0dc3f442013-09-05 14:41:35 -04001290 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001291 if (next_block->offset >= end) {
1292 next = MIN(next, next_block->offset);
1293 }
1294 }
1295 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001296 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001297 mingap = next - end;
1298 }
1299 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001300
1301 if (offset == RAM_ADDR_MAX) {
1302 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1303 (uint64_t)size);
1304 abort();
1305 }
1306
Alex Williamson04b16652010-07-02 11:13:17 -06001307 return offset;
1308}
1309
Juan Quintela652d7ec2012-07-20 10:37:54 +02001310ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001311{
Alex Williamsond17b5282010-06-25 11:08:38 -06001312 RAMBlock *block;
1313 ram_addr_t last = 0;
1314
Mike Day0dc3f442013-09-05 14:41:35 -04001315 rcu_read_lock();
1316 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001317 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001318 }
Mike Day0dc3f442013-09-05 14:41:35 -04001319 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001320 return last;
1321}
1322
Jason Baronddb97f12012-08-02 15:44:16 -04001323static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1324{
1325 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001326
1327 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001328 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001329 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1330 if (ret) {
1331 perror("qemu_madvise");
1332 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1333 "but dump_guest_core=off specified\n");
1334 }
1335 }
1336}
1337
Mike Day0dc3f442013-09-05 14:41:35 -04001338/* Called within an RCU critical section, or while the ramlist lock
1339 * is held.
1340 */
Hu Tao20cfe882014-04-02 15:13:26 +08001341static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001342{
Hu Tao20cfe882014-04-02 15:13:26 +08001343 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001344
Mike Day0dc3f442013-09-05 14:41:35 -04001345 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001346 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001347 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001348 }
1349 }
Hu Tao20cfe882014-04-02 15:13:26 +08001350
1351 return NULL;
1352}
1353
Mike Dayae3a7042013-09-05 14:41:35 -04001354/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001355void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1356{
Mike Dayae3a7042013-09-05 14:41:35 -04001357 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001358
Mike Day0dc3f442013-09-05 14:41:35 -04001359 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001360 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001361 assert(new_block);
1362 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001363
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001364 if (dev) {
1365 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001366 if (id) {
1367 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001368 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001369 }
1370 }
1371 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1372
Mike Day0dc3f442013-09-05 14:41:35 -04001373 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001374 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001375 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1376 new_block->idstr);
1377 abort();
1378 }
1379 }
Mike Day0dc3f442013-09-05 14:41:35 -04001380 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001381}
1382
Mike Dayae3a7042013-09-05 14:41:35 -04001383/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001384void qemu_ram_unset_idstr(ram_addr_t addr)
1385{
Mike Dayae3a7042013-09-05 14:41:35 -04001386 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001387
Mike Dayae3a7042013-09-05 14:41:35 -04001388 /* FIXME: arch_init.c assumes that this is not called throughout
1389 * migration. Ignore the problem since hot-unplug during migration
1390 * does not work anyway.
1391 */
1392
Mike Day0dc3f442013-09-05 14:41:35 -04001393 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001394 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001395 if (block) {
1396 memset(block->idstr, 0, sizeof(block->idstr));
1397 }
Mike Day0dc3f442013-09-05 14:41:35 -04001398 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001399}
1400
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001401static int memory_try_enable_merging(void *addr, size_t len)
1402{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001403 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001404 /* disabled by the user */
1405 return 0;
1406 }
1407
1408 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1409}
1410
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001411/* Only legal before guest might have detected the memory size: e.g. on
1412 * incoming migration, or right after reset.
1413 *
1414 * As memory core doesn't know how is memory accessed, it is up to
1415 * resize callback to update device state and/or add assertions to detect
1416 * misuse, if necessary.
1417 */
1418int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1419{
1420 RAMBlock *block = find_ram_block(base);
1421
1422 assert(block);
1423
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001424 newsize = TARGET_PAGE_ALIGN(newsize);
1425
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001426 if (block->used_length == newsize) {
1427 return 0;
1428 }
1429
1430 if (!(block->flags & RAM_RESIZEABLE)) {
1431 error_setg_errno(errp, EINVAL,
1432 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1433 " in != 0x" RAM_ADDR_FMT, block->idstr,
1434 newsize, block->used_length);
1435 return -EINVAL;
1436 }
1437
1438 if (block->max_length < newsize) {
1439 error_setg_errno(errp, EINVAL,
1440 "Length too large: %s: 0x" RAM_ADDR_FMT
1441 " > 0x" RAM_ADDR_FMT, block->idstr,
1442 newsize, block->max_length);
1443 return -EINVAL;
1444 }
1445
1446 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1447 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001448 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1449 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001450 memory_region_set_size(block->mr, newsize);
1451 if (block->resized) {
1452 block->resized(block->idstr, newsize, block->host);
1453 }
1454 return 0;
1455}
1456
Hu Taoef701d72014-09-09 13:27:54 +08001457static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001458{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001459 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001460 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001461 ram_addr_t old_ram_size, new_ram_size;
1462
1463 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001464
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001465 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001466 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001467
1468 if (!new_block->host) {
1469 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001470 xen_ram_alloc(new_block->offset, new_block->max_length,
1471 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001472 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001473 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001474 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001475 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001476 error_setg_errno(errp, errno,
1477 "cannot set up guest memory '%s'",
1478 memory_region_name(new_block->mr));
1479 qemu_mutex_unlock_ramlist();
1480 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001481 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001482 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001483 }
1484 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001485
Li Zhijiandd631692015-07-02 20:18:06 +08001486 new_ram_size = MAX(old_ram_size,
1487 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1488 if (new_ram_size > old_ram_size) {
1489 migration_bitmap_extend(old_ram_size, new_ram_size);
1490 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001491 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1492 * QLIST (which has an RCU-friendly variant) does not have insertion at
1493 * tail, so save the last element in last_block.
1494 */
Mike Day0dc3f442013-09-05 14:41:35 -04001495 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001496 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001497 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001498 break;
1499 }
1500 }
1501 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001502 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001503 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001504 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001505 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001506 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001507 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001508 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001509
Mike Day0dc3f442013-09-05 14:41:35 -04001510 /* Write list before version */
1511 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001512 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001513 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001514
Juan Quintela2152f5c2013-10-08 13:52:02 +02001515 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1516
1517 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001518 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001519
1520 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001521 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1522 ram_list.dirty_memory[i] =
1523 bitmap_zero_extend(ram_list.dirty_memory[i],
1524 old_ram_size, new_ram_size);
1525 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001526 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001527 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001528 new_block->used_length,
1529 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001530
Paolo Bonzinia904c912015-01-21 16:18:35 +01001531 if (new_block->host) {
1532 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1533 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1534 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1535 if (kvm_enabled()) {
1536 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1537 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001538 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001539
1540 return new_block->offset;
1541}
1542
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001543#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001544ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001545 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001546 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001547{
1548 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001549 ram_addr_t addr;
1550 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001551
1552 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001553 error_setg(errp, "-mem-path not supported with Xen");
1554 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001555 }
1556
1557 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1558 /*
1559 * file_ram_alloc() needs to allocate just like
1560 * phys_mem_alloc, but we haven't bothered to provide
1561 * a hook there.
1562 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001563 error_setg(errp,
1564 "-mem-path not supported with this accelerator");
1565 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001566 }
1567
1568 size = TARGET_PAGE_ALIGN(size);
1569 new_block = g_malloc0(sizeof(*new_block));
1570 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001571 new_block->used_length = size;
1572 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001573 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001574 new_block->host = file_ram_alloc(new_block, size,
1575 mem_path, errp);
1576 if (!new_block->host) {
1577 g_free(new_block);
1578 return -1;
1579 }
1580
Hu Taoef701d72014-09-09 13:27:54 +08001581 addr = ram_block_add(new_block, &local_err);
1582 if (local_err) {
1583 g_free(new_block);
1584 error_propagate(errp, local_err);
1585 return -1;
1586 }
1587 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001588}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001589#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001590
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001591static
1592ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1593 void (*resized)(const char*,
1594 uint64_t length,
1595 void *host),
1596 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001597 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001598{
1599 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001600 ram_addr_t addr;
1601 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001602
1603 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001604 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001605 new_block = g_malloc0(sizeof(*new_block));
1606 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001607 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001608 new_block->used_length = size;
1609 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001610 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001611 new_block->fd = -1;
1612 new_block->host = host;
1613 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001614 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001615 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001616 if (resizeable) {
1617 new_block->flags |= RAM_RESIZEABLE;
1618 }
Hu Taoef701d72014-09-09 13:27:54 +08001619 addr = ram_block_add(new_block, &local_err);
1620 if (local_err) {
1621 g_free(new_block);
1622 error_propagate(errp, local_err);
1623 return -1;
1624 }
1625 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001626}
1627
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001628ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1629 MemoryRegion *mr, Error **errp)
1630{
1631 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1632}
1633
Hu Taoef701d72014-09-09 13:27:54 +08001634ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001635{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001636 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1637}
1638
1639ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1640 void (*resized)(const char*,
1641 uint64_t length,
1642 void *host),
1643 MemoryRegion *mr, Error **errp)
1644{
1645 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001646}
bellarde9a1ab12007-02-08 23:08:38 +00001647
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001648void qemu_ram_free_from_ptr(ram_addr_t addr)
1649{
1650 RAMBlock *block;
1651
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001652 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001653 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001654 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001655 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001656 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001657 /* Write list before version */
1658 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001659 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001660 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001661 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001662 }
1663 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001664 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001665}
1666
Paolo Bonzini43771532013-09-09 17:58:40 +02001667static void reclaim_ramblock(RAMBlock *block)
1668{
1669 if (block->flags & RAM_PREALLOC) {
1670 ;
1671 } else if (xen_enabled()) {
1672 xen_invalidate_map_cache_entry(block->host);
1673#ifndef _WIN32
1674 } else if (block->fd >= 0) {
1675 munmap(block->host, block->max_length);
1676 close(block->fd);
1677#endif
1678 } else {
1679 qemu_anon_ram_free(block->host, block->max_length);
1680 }
1681 g_free(block);
1682}
1683
Anthony Liguoric227f092009-10-01 16:12:16 -05001684void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001685{
Alex Williamson04b16652010-07-02 11:13:17 -06001686 RAMBlock *block;
1687
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001688 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001689 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001690 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001691 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001692 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001693 /* Write list before version */
1694 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001695 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001696 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001697 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001698 }
1699 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001700 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001701}
1702
Huang Yingcd19cfa2011-03-02 08:56:19 +01001703#ifndef _WIN32
1704void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1705{
1706 RAMBlock *block;
1707 ram_addr_t offset;
1708 int flags;
1709 void *area, *vaddr;
1710
Mike Day0dc3f442013-09-05 14:41:35 -04001711 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001712 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001713 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001714 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001715 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001716 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001717 } else if (xen_enabled()) {
1718 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001719 } else {
1720 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001721 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001722 flags |= (block->flags & RAM_SHARED ?
1723 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001724 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1725 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001726 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001727 /*
1728 * Remap needs to match alloc. Accelerators that
1729 * set phys_mem_alloc never remap. If they did,
1730 * we'd need a remap hook here.
1731 */
1732 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1733
Huang Yingcd19cfa2011-03-02 08:56:19 +01001734 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1735 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1736 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001737 }
1738 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001739 fprintf(stderr, "Could not remap addr: "
1740 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001741 length, addr);
1742 exit(1);
1743 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001744 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001745 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001746 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001747 }
1748 }
1749}
1750#endif /* !_WIN32 */
1751
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001752int qemu_get_ram_fd(ram_addr_t addr)
1753{
Mike Dayae3a7042013-09-05 14:41:35 -04001754 RAMBlock *block;
1755 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001756
Mike Day0dc3f442013-09-05 14:41:35 -04001757 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001758 block = qemu_get_ram_block(addr);
1759 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001760 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001761 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001762}
1763
Damjan Marion3fd74b82014-06-26 23:01:32 +02001764void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1765{
Mike Dayae3a7042013-09-05 14:41:35 -04001766 RAMBlock *block;
1767 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001768
Mike Day0dc3f442013-09-05 14:41:35 -04001769 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001770 block = qemu_get_ram_block(addr);
1771 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001772 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001773 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001774}
1775
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001776/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001777 * This should not be used for general purpose DMA. Use address_space_map
1778 * or address_space_rw instead. For local memory (e.g. video ram) that the
1779 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001780 *
1781 * By the time this function returns, the returned pointer is not protected
1782 * by RCU anymore. If the caller is not within an RCU critical section and
1783 * does not hold the iothread lock, it must have other means of protecting the
1784 * pointer, such as a reference to the region that includes the incoming
1785 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001786 */
1787void *qemu_get_ram_ptr(ram_addr_t addr)
1788{
Mike Dayae3a7042013-09-05 14:41:35 -04001789 RAMBlock *block;
1790 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001791
Mike Day0dc3f442013-09-05 14:41:35 -04001792 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001793 block = qemu_get_ram_block(addr);
1794
1795 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001796 /* We need to check if the requested address is in the RAM
1797 * because we don't want to map the entire memory in QEMU.
1798 * In that case just map until the end of the page.
1799 */
1800 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001801 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001802 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001803 }
Mike Dayae3a7042013-09-05 14:41:35 -04001804
1805 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001806 }
Mike Dayae3a7042013-09-05 14:41:35 -04001807 ptr = ramblock_ptr(block, addr - block->offset);
1808
Mike Day0dc3f442013-09-05 14:41:35 -04001809unlock:
1810 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001811 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001812}
1813
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001814/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001815 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001816 *
1817 * By the time this function returns, the returned pointer is not protected
1818 * by RCU anymore. If the caller is not within an RCU critical section and
1819 * does not hold the iothread lock, it must have other means of protecting the
1820 * pointer, such as a reference to the region that includes the incoming
1821 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001822 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001823static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001824{
Mike Dayae3a7042013-09-05 14:41:35 -04001825 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001826 if (*size == 0) {
1827 return NULL;
1828 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001829 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001830 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001831 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001832 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001833 rcu_read_lock();
1834 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001835 if (addr - block->offset < block->max_length) {
1836 if (addr - block->offset + *size > block->max_length)
1837 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001838 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001839 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001840 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001841 }
1842 }
1843
1844 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1845 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001846 }
1847}
1848
Paolo Bonzini7443b432013-06-03 12:44:02 +02001849/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001850 * (typically a TLB entry) back to a ram offset.
1851 *
1852 * By the time this function returns, the returned pointer is not protected
1853 * by RCU anymore. If the caller is not within an RCU critical section and
1854 * does not hold the iothread lock, it must have other means of protecting the
1855 * pointer, such as a reference to the region that includes the incoming
1856 * ram_addr_t.
1857 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001858MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001859{
pbrook94a6b542009-04-11 17:15:54 +00001860 RAMBlock *block;
1861 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001862 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001863
Jan Kiszka868bb332011-06-21 22:59:09 +02001864 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001865 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001866 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001867 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001868 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001869 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001870 }
1871
Mike Day0dc3f442013-09-05 14:41:35 -04001872 rcu_read_lock();
1873 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001874 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001875 goto found;
1876 }
1877
Mike Day0dc3f442013-09-05 14:41:35 -04001878 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001879 /* This case append when the block is not mapped. */
1880 if (block->host == NULL) {
1881 continue;
1882 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001883 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001884 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001885 }
pbrook94a6b542009-04-11 17:15:54 +00001886 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001887
Mike Day0dc3f442013-09-05 14:41:35 -04001888 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001889 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001890
1891found:
1892 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001893 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001894 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001895 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001896}
Alex Williamsonf471a172010-06-11 11:11:42 -06001897
Avi Kivitya8170e52012-10-23 12:30:10 +02001898static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001899 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001900{
Juan Quintela52159192013-10-08 12:44:04 +02001901 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001902 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001903 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001904 switch (size) {
1905 case 1:
1906 stb_p(qemu_get_ram_ptr(ram_addr), val);
1907 break;
1908 case 2:
1909 stw_p(qemu_get_ram_ptr(ram_addr), val);
1910 break;
1911 case 4:
1912 stl_p(qemu_get_ram_ptr(ram_addr), val);
1913 break;
1914 default:
1915 abort();
1916 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001917 /* Set both VGA and migration bits for simplicity and to remove
1918 * the notdirty callback faster.
1919 */
1920 cpu_physical_memory_set_dirty_range(ram_addr, size,
1921 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001922 /* we remove the notdirty callback only if the code has been
1923 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001924 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001925 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001926 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001927 }
bellard1ccde1c2004-02-06 19:46:14 +00001928}
1929
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001930static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1931 unsigned size, bool is_write)
1932{
1933 return is_write;
1934}
1935
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001936static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001937 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001938 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001939 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001940};
1941
pbrook0f459d12008-06-09 00:20:13 +00001942/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001943static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001944{
Andreas Färber93afead2013-08-26 03:41:01 +02001945 CPUState *cpu = current_cpu;
1946 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001947 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001948 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001949 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001950 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001951
Andreas Färberff4700b2013-08-26 18:23:18 +02001952 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001953 /* We re-entered the check after replacing the TB. Now raise
1954 * the debug interrupt so that is will trigger after the
1955 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001956 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001957 return;
1958 }
Andreas Färber93afead2013-08-26 03:41:01 +02001959 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001960 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001961 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1962 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001963 if (flags == BP_MEM_READ) {
1964 wp->flags |= BP_WATCHPOINT_HIT_READ;
1965 } else {
1966 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1967 }
1968 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001969 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001970 if (!cpu->watchpoint_hit) {
1971 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001972 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001973 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001974 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001975 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001976 } else {
1977 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001978 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001979 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001980 }
aliguori06d55cc2008-11-18 20:24:06 +00001981 }
aliguori6e140f22008-11-18 20:37:55 +00001982 } else {
1983 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001984 }
1985 }
1986}
1987
pbrook6658ffb2007-03-16 23:58:11 +00001988/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1989 so these check for a hit then pass through to the normal out-of-line
1990 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001991static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1992 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001993{
Peter Maydell66b9b432015-04-26 16:49:24 +01001994 MemTxResult res;
1995 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001996
Peter Maydell66b9b432015-04-26 16:49:24 +01001997 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001998 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001999 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002000 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002001 break;
2002 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002003 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002004 break;
2005 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002006 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002007 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002008 default: abort();
2009 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002010 *pdata = data;
2011 return res;
2012}
2013
2014static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2015 uint64_t val, unsigned size,
2016 MemTxAttrs attrs)
2017{
2018 MemTxResult res;
2019
2020 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2021 switch (size) {
2022 case 1:
2023 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2024 break;
2025 case 2:
2026 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2027 break;
2028 case 4:
2029 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2030 break;
2031 default: abort();
2032 }
2033 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002034}
2035
Avi Kivity1ec9b902012-01-02 12:47:48 +02002036static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002037 .read_with_attrs = watch_mem_read,
2038 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002039 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002040};
pbrook6658ffb2007-03-16 23:58:11 +00002041
Peter Maydellf25a49e2015-04-26 16:49:24 +01002042static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2043 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002044{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002045 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002046 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002047 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002048
blueswir1db7b5422007-05-26 17:36:03 +00002049#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002050 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002051 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002052#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002053 res = address_space_read(subpage->as, addr + subpage->base,
2054 attrs, buf, len);
2055 if (res) {
2056 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002057 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002058 switch (len) {
2059 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002060 *data = ldub_p(buf);
2061 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002062 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002063 *data = lduw_p(buf);
2064 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002065 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002066 *data = ldl_p(buf);
2067 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002068 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002069 *data = ldq_p(buf);
2070 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002071 default:
2072 abort();
2073 }
blueswir1db7b5422007-05-26 17:36:03 +00002074}
2075
Peter Maydellf25a49e2015-04-26 16:49:24 +01002076static MemTxResult subpage_write(void *opaque, hwaddr addr,
2077 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002078{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002079 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002080 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002081
blueswir1db7b5422007-05-26 17:36:03 +00002082#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002083 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002084 " value %"PRIx64"\n",
2085 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002086#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002087 switch (len) {
2088 case 1:
2089 stb_p(buf, value);
2090 break;
2091 case 2:
2092 stw_p(buf, value);
2093 break;
2094 case 4:
2095 stl_p(buf, value);
2096 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002097 case 8:
2098 stq_p(buf, value);
2099 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002100 default:
2101 abort();
2102 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002103 return address_space_write(subpage->as, addr + subpage->base,
2104 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002105}
2106
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002107static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002108 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002109{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002110 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002111#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002112 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002113 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002114#endif
2115
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002116 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002117 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002118}
2119
Avi Kivity70c68e42012-01-02 12:32:48 +02002120static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002121 .read_with_attrs = subpage_read,
2122 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002123 .impl.min_access_size = 1,
2124 .impl.max_access_size = 8,
2125 .valid.min_access_size = 1,
2126 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002127 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002128 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002129};
2130
Anthony Liguoric227f092009-10-01 16:12:16 -05002131static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002132 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002133{
2134 int idx, eidx;
2135
2136 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2137 return -1;
2138 idx = SUBPAGE_IDX(start);
2139 eidx = SUBPAGE_IDX(end);
2140#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002141 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2142 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002143#endif
blueswir1db7b5422007-05-26 17:36:03 +00002144 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002145 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002146 }
2147
2148 return 0;
2149}
2150
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002151static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002152{
Anthony Liguoric227f092009-10-01 16:12:16 -05002153 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002154
Anthony Liguori7267c092011-08-20 22:09:37 -05002155 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002156
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002157 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002158 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002159 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002160 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002161 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002162#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002163 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2164 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002165#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002166 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002167
2168 return mmio;
2169}
2170
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002171static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2172 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002173{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002174 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002175 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002176 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002177 .mr = mr,
2178 .offset_within_address_space = 0,
2179 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002180 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002181 };
2182
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002183 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002184}
2185
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002186MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002187{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002188 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2189 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002190
2191 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002192}
2193
Avi Kivitye9179ce2009-06-14 11:38:52 +03002194static void io_mem_init(void)
2195{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002196 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002197 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002198 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002199 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002200 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002201 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002202 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002203}
2204
Avi Kivityac1970f2012-10-03 16:22:53 +02002205static void mem_begin(MemoryListener *listener)
2206{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002207 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002208 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2209 uint16_t n;
2210
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002211 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002212 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002213 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002214 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002215 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002216 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002217 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002218 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002220 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002221 d->as = as;
2222 as->next_dispatch = d;
2223}
2224
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002225static void address_space_dispatch_free(AddressSpaceDispatch *d)
2226{
2227 phys_sections_free(&d->map);
2228 g_free(d);
2229}
2230
Paolo Bonzini00752702013-05-29 12:13:54 +02002231static void mem_commit(MemoryListener *listener)
2232{
2233 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002234 AddressSpaceDispatch *cur = as->dispatch;
2235 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002236
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002237 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002238
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002239 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002240 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002241 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002242 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002243}
2244
Avi Kivity1d711482012-10-02 18:54:45 +02002245static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002246{
Andreas Färber182735e2013-05-29 22:29:20 +02002247 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002248
2249 /* since each CPU stores ram addresses in its TLB cache, we must
2250 reset the modified entries */
2251 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002252 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002253 /* FIXME: Disentangle the cpu.h circular files deps so we can
2254 directly get the right CPU from listener. */
2255 if (cpu->tcg_as_listener != listener) {
2256 continue;
2257 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002258 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002259 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002260}
2261
Avi Kivityac1970f2012-10-03 16:22:53 +02002262void address_space_init_dispatch(AddressSpace *as)
2263{
Paolo Bonzini00752702013-05-29 12:13:54 +02002264 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002265 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002266 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002267 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002268 .region_add = mem_add,
2269 .region_nop = mem_add,
2270 .priority = 0,
2271 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002272 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002273}
2274
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002275void address_space_unregister(AddressSpace *as)
2276{
2277 memory_listener_unregister(&as->dispatch_listener);
2278}
2279
Avi Kivity83f3c252012-10-07 12:59:55 +02002280void address_space_destroy_dispatch(AddressSpace *as)
2281{
2282 AddressSpaceDispatch *d = as->dispatch;
2283
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002284 atomic_rcu_set(&as->dispatch, NULL);
2285 if (d) {
2286 call_rcu(d, address_space_dispatch_free, rcu);
2287 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002288}
2289
Avi Kivity62152b82011-07-26 14:26:14 +03002290static void memory_map_init(void)
2291{
Anthony Liguori7267c092011-08-20 22:09:37 -05002292 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002293
Paolo Bonzini57271d62013-11-07 17:14:37 +01002294 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002295 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002296
Anthony Liguori7267c092011-08-20 22:09:37 -05002297 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002298 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2299 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002300 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002301}
2302
2303MemoryRegion *get_system_memory(void)
2304{
2305 return system_memory;
2306}
2307
Avi Kivity309cb472011-08-08 16:09:03 +03002308MemoryRegion *get_system_io(void)
2309{
2310 return system_io;
2311}
2312
pbrooke2eef172008-06-08 01:09:01 +00002313#endif /* !defined(CONFIG_USER_ONLY) */
2314
bellard13eb76e2004-01-24 15:23:36 +00002315/* physical memory access (slow version, mainly for debug) */
2316#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002317int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002318 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002319{
2320 int l, flags;
2321 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002322 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002323
2324 while (len > 0) {
2325 page = addr & TARGET_PAGE_MASK;
2326 l = (page + TARGET_PAGE_SIZE) - addr;
2327 if (l > len)
2328 l = len;
2329 flags = page_get_flags(page);
2330 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002331 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002332 if (is_write) {
2333 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002334 return -1;
bellard579a97f2007-11-11 14:26:47 +00002335 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002336 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002337 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002338 memcpy(p, buf, l);
2339 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002340 } else {
2341 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002342 return -1;
bellard579a97f2007-11-11 14:26:47 +00002343 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002344 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002345 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002346 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002347 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002348 }
2349 len -= l;
2350 buf += l;
2351 addr += l;
2352 }
Paul Brooka68fe892010-03-01 00:08:59 +00002353 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002354}
bellard8df1cd02005-01-28 22:37:22 +00002355
bellard13eb76e2004-01-24 15:23:36 +00002356#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002357
Paolo Bonzini845b6212015-03-23 11:45:53 +01002358static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002359 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002360{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002361 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2362 /* No early return if dirty_log_mask is or becomes 0, because
2363 * cpu_physical_memory_set_dirty_range will still call
2364 * xen_modified_memory.
2365 */
2366 if (dirty_log_mask) {
2367 dirty_log_mask =
2368 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002369 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002370 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2371 tb_invalidate_phys_range(addr, addr + length);
2372 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2373 }
2374 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002375}
2376
Richard Henderson23326162013-07-08 14:55:59 -07002377static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002378{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002379 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002380
2381 /* Regions are assumed to support 1-4 byte accesses unless
2382 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002383 if (access_size_max == 0) {
2384 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002385 }
Richard Henderson23326162013-07-08 14:55:59 -07002386
2387 /* Bound the maximum access by the alignment of the address. */
2388 if (!mr->ops->impl.unaligned) {
2389 unsigned align_size_max = addr & -addr;
2390 if (align_size_max != 0 && align_size_max < access_size_max) {
2391 access_size_max = align_size_max;
2392 }
2393 }
2394
2395 /* Don't attempt accesses larger than the maximum. */
2396 if (l > access_size_max) {
2397 l = access_size_max;
2398 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002399 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002400
2401 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002402}
2403
Jan Kiszka4840f102015-06-18 18:47:22 +02002404static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002405{
Jan Kiszka4840f102015-06-18 18:47:22 +02002406 bool unlocked = !qemu_mutex_iothread_locked();
2407 bool release_lock = false;
2408
2409 if (unlocked && mr->global_locking) {
2410 qemu_mutex_lock_iothread();
2411 unlocked = false;
2412 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002413 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002414 if (mr->flush_coalesced_mmio) {
2415 if (unlocked) {
2416 qemu_mutex_lock_iothread();
2417 }
2418 qemu_flush_coalesced_mmio_buffer();
2419 if (unlocked) {
2420 qemu_mutex_unlock_iothread();
2421 }
2422 }
2423
2424 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002425}
2426
Peter Maydell5c9eb022015-04-26 16:49:24 +01002427MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2428 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002429{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002430 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002431 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002432 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002433 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002434 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002435 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002436 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002437
Paolo Bonzini41063e12015-03-18 14:21:43 +01002438 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002439 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002440 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002441 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002442
bellard13eb76e2004-01-24 15:23:36 +00002443 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002444 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002445 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002446 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002447 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002448 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002449 switch (l) {
2450 case 8:
2451 /* 64 bit write access */
2452 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002453 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2454 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002455 break;
2456 case 4:
bellard1c213d12005-09-03 10:49:04 +00002457 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002458 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002459 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2460 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002461 break;
2462 case 2:
bellard1c213d12005-09-03 10:49:04 +00002463 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002464 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002465 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2466 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002467 break;
2468 case 1:
bellard1c213d12005-09-03 10:49:04 +00002469 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002470 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002471 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2472 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002473 break;
2474 default:
2475 abort();
bellard13eb76e2004-01-24 15:23:36 +00002476 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002477 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002478 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002479 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002480 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002481 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002482 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002483 }
2484 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002485 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002486 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002487 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002488 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002489 switch (l) {
2490 case 8:
2491 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002492 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2493 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002494 stq_p(buf, val);
2495 break;
2496 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002497 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002498 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2499 attrs);
bellardc27004e2005-01-03 23:35:10 +00002500 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002501 break;
2502 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002503 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002504 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2505 attrs);
bellardc27004e2005-01-03 23:35:10 +00002506 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002507 break;
2508 case 1:
bellard1c213d12005-09-03 10:49:04 +00002509 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002510 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2511 attrs);
bellardc27004e2005-01-03 23:35:10 +00002512 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002513 break;
2514 default:
2515 abort();
bellard13eb76e2004-01-24 15:23:36 +00002516 }
2517 } else {
2518 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002519 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002520 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002521 }
2522 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002523
2524 if (release_lock) {
2525 qemu_mutex_unlock_iothread();
2526 release_lock = false;
2527 }
2528
bellard13eb76e2004-01-24 15:23:36 +00002529 len -= l;
2530 buf += l;
2531 addr += l;
2532 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002533 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002534
Peter Maydell3b643492015-04-26 16:49:23 +01002535 return result;
bellard13eb76e2004-01-24 15:23:36 +00002536}
bellard8df1cd02005-01-28 22:37:22 +00002537
Peter Maydell5c9eb022015-04-26 16:49:24 +01002538MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2539 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002540{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002541 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002542}
2543
Peter Maydell5c9eb022015-04-26 16:49:24 +01002544MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2545 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002546{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002547 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002548}
2549
2550
Avi Kivitya8170e52012-10-23 12:30:10 +02002551void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002552 int len, int is_write)
2553{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002554 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2555 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002556}
2557
Alexander Graf582b55a2013-12-11 14:17:44 +01002558enum write_rom_type {
2559 WRITE_DATA,
2560 FLUSH_CACHE,
2561};
2562
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002563static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002564 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002565{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002566 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002567 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002568 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002569 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002570
Paolo Bonzini41063e12015-03-18 14:21:43 +01002571 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002572 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002573 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002574 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002575
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002576 if (!(memory_region_is_ram(mr) ||
2577 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002578 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002579 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002580 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002581 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002582 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002583 switch (type) {
2584 case WRITE_DATA:
2585 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002586 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002587 break;
2588 case FLUSH_CACHE:
2589 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2590 break;
2591 }
bellardd0ecd2a2006-04-23 17:14:48 +00002592 }
2593 len -= l;
2594 buf += l;
2595 addr += l;
2596 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002597 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002598}
2599
Alexander Graf582b55a2013-12-11 14:17:44 +01002600/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002601void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002602 const uint8_t *buf, int len)
2603{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002604 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002605}
2606
2607void cpu_flush_icache_range(hwaddr start, int len)
2608{
2609 /*
2610 * This function should do the same thing as an icache flush that was
2611 * triggered from within the guest. For TCG we are always cache coherent,
2612 * so there is no need to flush anything. For KVM / Xen we need to flush
2613 * the host's instruction cache at least.
2614 */
2615 if (tcg_enabled()) {
2616 return;
2617 }
2618
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002619 cpu_physical_memory_write_rom_internal(&address_space_memory,
2620 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002621}
2622
aliguori6d16c2f2009-01-22 16:59:11 +00002623typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002624 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002625 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002626 hwaddr addr;
2627 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002628 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002629} BounceBuffer;
2630
2631static BounceBuffer bounce;
2632
aliguoriba223c22009-01-22 16:59:16 +00002633typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002634 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002635 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002636} MapClient;
2637
Fam Zheng38e047b2015-03-16 17:03:35 +08002638QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002639static QLIST_HEAD(map_client_list, MapClient) map_client_list
2640 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002641
Fam Zhenge95205e2015-03-16 17:03:37 +08002642static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002643{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002644 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002645 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002646}
2647
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002648static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002649{
2650 MapClient *client;
2651
Blue Swirl72cf2d42009-09-12 07:36:22 +00002652 while (!QLIST_EMPTY(&map_client_list)) {
2653 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002654 qemu_bh_schedule(client->bh);
2655 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002656 }
2657}
2658
Fam Zhenge95205e2015-03-16 17:03:37 +08002659void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002660{
2661 MapClient *client = g_malloc(sizeof(*client));
2662
Fam Zheng38e047b2015-03-16 17:03:35 +08002663 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002664 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002665 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002666 if (!atomic_read(&bounce.in_use)) {
2667 cpu_notify_map_clients_locked();
2668 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002669 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002670}
2671
Fam Zheng38e047b2015-03-16 17:03:35 +08002672void cpu_exec_init_all(void)
2673{
2674 qemu_mutex_init(&ram_list.mutex);
2675 memory_map_init();
2676 io_mem_init();
2677 qemu_mutex_init(&map_client_list_lock);
2678}
2679
Fam Zhenge95205e2015-03-16 17:03:37 +08002680void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002681{
Fam Zhenge95205e2015-03-16 17:03:37 +08002682 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002683
Fam Zhenge95205e2015-03-16 17:03:37 +08002684 qemu_mutex_lock(&map_client_list_lock);
2685 QLIST_FOREACH(client, &map_client_list, link) {
2686 if (client->bh == bh) {
2687 cpu_unregister_map_client_do(client);
2688 break;
2689 }
2690 }
2691 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002692}
2693
2694static void cpu_notify_map_clients(void)
2695{
Fam Zheng38e047b2015-03-16 17:03:35 +08002696 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002697 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002698 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002699}
2700
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002701bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2702{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002703 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002704 hwaddr l, xlat;
2705
Paolo Bonzini41063e12015-03-18 14:21:43 +01002706 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002707 while (len > 0) {
2708 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002709 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2710 if (!memory_access_is_direct(mr, is_write)) {
2711 l = memory_access_size(mr, l, addr);
2712 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002713 return false;
2714 }
2715 }
2716
2717 len -= l;
2718 addr += l;
2719 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002720 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002721 return true;
2722}
2723
aliguori6d16c2f2009-01-22 16:59:11 +00002724/* Map a physical memory region into a host virtual address.
2725 * May map a subset of the requested range, given by and returned in *plen.
2726 * May return NULL if resources needed to perform the mapping are exhausted.
2727 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002728 * Use cpu_register_map_client() to know when retrying the map operation is
2729 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002730 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002731void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002732 hwaddr addr,
2733 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002734 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002735{
Avi Kivitya8170e52012-10-23 12:30:10 +02002736 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002737 hwaddr done = 0;
2738 hwaddr l, xlat, base;
2739 MemoryRegion *mr, *this_mr;
2740 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002741
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002742 if (len == 0) {
2743 return NULL;
2744 }
aliguori6d16c2f2009-01-22 16:59:11 +00002745
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002746 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002747 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002748 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002749
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002750 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002751 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002752 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002753 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002754 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002755 /* Avoid unbounded allocations */
2756 l = MIN(l, TARGET_PAGE_SIZE);
2757 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002758 bounce.addr = addr;
2759 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002760
2761 memory_region_ref(mr);
2762 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002763 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002764 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2765 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002766 }
aliguori6d16c2f2009-01-22 16:59:11 +00002767
Paolo Bonzini41063e12015-03-18 14:21:43 +01002768 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002769 *plen = l;
2770 return bounce.buffer;
2771 }
2772
2773 base = xlat;
2774 raddr = memory_region_get_ram_addr(mr);
2775
2776 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002777 len -= l;
2778 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002779 done += l;
2780 if (len == 0) {
2781 break;
2782 }
2783
2784 l = len;
2785 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2786 if (this_mr != mr || xlat != base + done) {
2787 break;
2788 }
aliguori6d16c2f2009-01-22 16:59:11 +00002789 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002790
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002791 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002792 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002793 *plen = done;
2794 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002795}
2796
Avi Kivityac1970f2012-10-03 16:22:53 +02002797/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002798 * Will also mark the memory as dirty if is_write == 1. access_len gives
2799 * the amount of memory that was actually read or written by the caller.
2800 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002801void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2802 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002803{
2804 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002805 MemoryRegion *mr;
2806 ram_addr_t addr1;
2807
2808 mr = qemu_ram_addr_from_host(buffer, &addr1);
2809 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002810 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002811 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002812 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002813 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002814 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002815 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002816 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002817 return;
2818 }
2819 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002820 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2821 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002822 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002823 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002824 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002825 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002826 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002827 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002828}
bellardd0ecd2a2006-04-23 17:14:48 +00002829
Avi Kivitya8170e52012-10-23 12:30:10 +02002830void *cpu_physical_memory_map(hwaddr addr,
2831 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002832 int is_write)
2833{
2834 return address_space_map(&address_space_memory, addr, plen, is_write);
2835}
2836
Avi Kivitya8170e52012-10-23 12:30:10 +02002837void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2838 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002839{
2840 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2841}
2842
bellard8df1cd02005-01-28 22:37:22 +00002843/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002844static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2845 MemTxAttrs attrs,
2846 MemTxResult *result,
2847 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002848{
bellard8df1cd02005-01-28 22:37:22 +00002849 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002850 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002851 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002852 hwaddr l = 4;
2853 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002854 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002855 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002856
Paolo Bonzini41063e12015-03-18 14:21:43 +01002857 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002858 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002859 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002860 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002861
bellard8df1cd02005-01-28 22:37:22 +00002862 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002863 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002864#if defined(TARGET_WORDS_BIGENDIAN)
2865 if (endian == DEVICE_LITTLE_ENDIAN) {
2866 val = bswap32(val);
2867 }
2868#else
2869 if (endian == DEVICE_BIG_ENDIAN) {
2870 val = bswap32(val);
2871 }
2872#endif
bellard8df1cd02005-01-28 22:37:22 +00002873 } else {
2874 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002875 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002876 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002877 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002878 switch (endian) {
2879 case DEVICE_LITTLE_ENDIAN:
2880 val = ldl_le_p(ptr);
2881 break;
2882 case DEVICE_BIG_ENDIAN:
2883 val = ldl_be_p(ptr);
2884 break;
2885 default:
2886 val = ldl_p(ptr);
2887 break;
2888 }
Peter Maydell50013112015-04-26 16:49:24 +01002889 r = MEMTX_OK;
2890 }
2891 if (result) {
2892 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002893 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002894 if (release_lock) {
2895 qemu_mutex_unlock_iothread();
2896 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002897 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002898 return val;
2899}
2900
Peter Maydell50013112015-04-26 16:49:24 +01002901uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2902 MemTxAttrs attrs, MemTxResult *result)
2903{
2904 return address_space_ldl_internal(as, addr, attrs, result,
2905 DEVICE_NATIVE_ENDIAN);
2906}
2907
2908uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2909 MemTxAttrs attrs, MemTxResult *result)
2910{
2911 return address_space_ldl_internal(as, addr, attrs, result,
2912 DEVICE_LITTLE_ENDIAN);
2913}
2914
2915uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2916 MemTxAttrs attrs, MemTxResult *result)
2917{
2918 return address_space_ldl_internal(as, addr, attrs, result,
2919 DEVICE_BIG_ENDIAN);
2920}
2921
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002922uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002923{
Peter Maydell50013112015-04-26 16:49:24 +01002924 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002925}
2926
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002927uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002928{
Peter Maydell50013112015-04-26 16:49:24 +01002929 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002930}
2931
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002932uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002933{
Peter Maydell50013112015-04-26 16:49:24 +01002934 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002935}
2936
bellard84b7b8e2005-11-28 21:19:04 +00002937/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002938static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2939 MemTxAttrs attrs,
2940 MemTxResult *result,
2941 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002942{
bellard84b7b8e2005-11-28 21:19:04 +00002943 uint8_t *ptr;
2944 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002945 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002946 hwaddr l = 8;
2947 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002948 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002949 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002950
Paolo Bonzini41063e12015-03-18 14:21:43 +01002951 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002952 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002953 false);
2954 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002955 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002956
bellard84b7b8e2005-11-28 21:19:04 +00002957 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002958 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002959#if defined(TARGET_WORDS_BIGENDIAN)
2960 if (endian == DEVICE_LITTLE_ENDIAN) {
2961 val = bswap64(val);
2962 }
2963#else
2964 if (endian == DEVICE_BIG_ENDIAN) {
2965 val = bswap64(val);
2966 }
2967#endif
bellard84b7b8e2005-11-28 21:19:04 +00002968 } else {
2969 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002970 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002971 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002972 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002973 switch (endian) {
2974 case DEVICE_LITTLE_ENDIAN:
2975 val = ldq_le_p(ptr);
2976 break;
2977 case DEVICE_BIG_ENDIAN:
2978 val = ldq_be_p(ptr);
2979 break;
2980 default:
2981 val = ldq_p(ptr);
2982 break;
2983 }
Peter Maydell50013112015-04-26 16:49:24 +01002984 r = MEMTX_OK;
2985 }
2986 if (result) {
2987 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002988 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002989 if (release_lock) {
2990 qemu_mutex_unlock_iothread();
2991 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002992 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002993 return val;
2994}
2995
Peter Maydell50013112015-04-26 16:49:24 +01002996uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2997 MemTxAttrs attrs, MemTxResult *result)
2998{
2999 return address_space_ldq_internal(as, addr, attrs, result,
3000 DEVICE_NATIVE_ENDIAN);
3001}
3002
3003uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3004 MemTxAttrs attrs, MemTxResult *result)
3005{
3006 return address_space_ldq_internal(as, addr, attrs, result,
3007 DEVICE_LITTLE_ENDIAN);
3008}
3009
3010uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3011 MemTxAttrs attrs, MemTxResult *result)
3012{
3013 return address_space_ldq_internal(as, addr, attrs, result,
3014 DEVICE_BIG_ENDIAN);
3015}
3016
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003017uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003018{
Peter Maydell50013112015-04-26 16:49:24 +01003019 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003020}
3021
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003022uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003023{
Peter Maydell50013112015-04-26 16:49:24 +01003024 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003025}
3026
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003027uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003028{
Peter Maydell50013112015-04-26 16:49:24 +01003029 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003030}
3031
bellardaab33092005-10-30 20:48:42 +00003032/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003033uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3034 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003035{
3036 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003037 MemTxResult r;
3038
3039 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3040 if (result) {
3041 *result = r;
3042 }
bellardaab33092005-10-30 20:48:42 +00003043 return val;
3044}
3045
Peter Maydell50013112015-04-26 16:49:24 +01003046uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3047{
3048 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3049}
3050
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003051/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003052static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3053 hwaddr addr,
3054 MemTxAttrs attrs,
3055 MemTxResult *result,
3056 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003057{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003058 uint8_t *ptr;
3059 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003060 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003061 hwaddr l = 2;
3062 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003063 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003064 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003065
Paolo Bonzini41063e12015-03-18 14:21:43 +01003066 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003067 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003068 false);
3069 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003070 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003071
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003072 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003073 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003074#if defined(TARGET_WORDS_BIGENDIAN)
3075 if (endian == DEVICE_LITTLE_ENDIAN) {
3076 val = bswap16(val);
3077 }
3078#else
3079 if (endian == DEVICE_BIG_ENDIAN) {
3080 val = bswap16(val);
3081 }
3082#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003083 } else {
3084 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003085 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003086 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003087 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003088 switch (endian) {
3089 case DEVICE_LITTLE_ENDIAN:
3090 val = lduw_le_p(ptr);
3091 break;
3092 case DEVICE_BIG_ENDIAN:
3093 val = lduw_be_p(ptr);
3094 break;
3095 default:
3096 val = lduw_p(ptr);
3097 break;
3098 }
Peter Maydell50013112015-04-26 16:49:24 +01003099 r = MEMTX_OK;
3100 }
3101 if (result) {
3102 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003103 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003104 if (release_lock) {
3105 qemu_mutex_unlock_iothread();
3106 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003107 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003108 return val;
bellardaab33092005-10-30 20:48:42 +00003109}
3110
Peter Maydell50013112015-04-26 16:49:24 +01003111uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3112 MemTxAttrs attrs, MemTxResult *result)
3113{
3114 return address_space_lduw_internal(as, addr, attrs, result,
3115 DEVICE_NATIVE_ENDIAN);
3116}
3117
3118uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3119 MemTxAttrs attrs, MemTxResult *result)
3120{
3121 return address_space_lduw_internal(as, addr, attrs, result,
3122 DEVICE_LITTLE_ENDIAN);
3123}
3124
3125uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3126 MemTxAttrs attrs, MemTxResult *result)
3127{
3128 return address_space_lduw_internal(as, addr, attrs, result,
3129 DEVICE_BIG_ENDIAN);
3130}
3131
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003132uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003133{
Peter Maydell50013112015-04-26 16:49:24 +01003134 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135}
3136
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003137uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003138{
Peter Maydell50013112015-04-26 16:49:24 +01003139 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003140}
3141
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003142uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003143{
Peter Maydell50013112015-04-26 16:49:24 +01003144 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003145}
3146
bellard8df1cd02005-01-28 22:37:22 +00003147/* warning: addr must be aligned. The ram page is not masked as dirty
3148 and the code inside is not invalidated. It is useful if the dirty
3149 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003150void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3151 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003152{
bellard8df1cd02005-01-28 22:37:22 +00003153 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003154 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003155 hwaddr l = 4;
3156 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003157 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003158 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003159 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003160
Paolo Bonzini41063e12015-03-18 14:21:43 +01003161 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003162 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003163 true);
3164 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003165 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003166
Peter Maydell50013112015-04-26 16:49:24 +01003167 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003168 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003169 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003170 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003171 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003172
Paolo Bonzini845b6212015-03-23 11:45:53 +01003173 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3174 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003175 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003176 r = MEMTX_OK;
3177 }
3178 if (result) {
3179 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003180 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003181 if (release_lock) {
3182 qemu_mutex_unlock_iothread();
3183 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003184 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003185}
3186
Peter Maydell50013112015-04-26 16:49:24 +01003187void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3188{
3189 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3190}
3191
bellard8df1cd02005-01-28 22:37:22 +00003192/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003193static inline void address_space_stl_internal(AddressSpace *as,
3194 hwaddr addr, uint32_t val,
3195 MemTxAttrs attrs,
3196 MemTxResult *result,
3197 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003198{
bellard8df1cd02005-01-28 22:37:22 +00003199 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003200 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003201 hwaddr l = 4;
3202 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003203 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003204 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003205
Paolo Bonzini41063e12015-03-18 14:21:43 +01003206 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003207 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003208 true);
3209 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003210 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003211
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003212#if defined(TARGET_WORDS_BIGENDIAN)
3213 if (endian == DEVICE_LITTLE_ENDIAN) {
3214 val = bswap32(val);
3215 }
3216#else
3217 if (endian == DEVICE_BIG_ENDIAN) {
3218 val = bswap32(val);
3219 }
3220#endif
Peter Maydell50013112015-04-26 16:49:24 +01003221 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003222 } else {
bellard8df1cd02005-01-28 22:37:22 +00003223 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003224 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003225 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003226 switch (endian) {
3227 case DEVICE_LITTLE_ENDIAN:
3228 stl_le_p(ptr, val);
3229 break;
3230 case DEVICE_BIG_ENDIAN:
3231 stl_be_p(ptr, val);
3232 break;
3233 default:
3234 stl_p(ptr, val);
3235 break;
3236 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003237 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003238 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003239 }
Peter Maydell50013112015-04-26 16:49:24 +01003240 if (result) {
3241 *result = r;
3242 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003243 if (release_lock) {
3244 qemu_mutex_unlock_iothread();
3245 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003246 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003247}
3248
3249void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3250 MemTxAttrs attrs, MemTxResult *result)
3251{
3252 address_space_stl_internal(as, addr, val, attrs, result,
3253 DEVICE_NATIVE_ENDIAN);
3254}
3255
3256void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3257 MemTxAttrs attrs, MemTxResult *result)
3258{
3259 address_space_stl_internal(as, addr, val, attrs, result,
3260 DEVICE_LITTLE_ENDIAN);
3261}
3262
3263void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3264 MemTxAttrs attrs, MemTxResult *result)
3265{
3266 address_space_stl_internal(as, addr, val, attrs, result,
3267 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003268}
3269
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003270void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003271{
Peter Maydell50013112015-04-26 16:49:24 +01003272 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003273}
3274
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003275void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003276{
Peter Maydell50013112015-04-26 16:49:24 +01003277 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003278}
3279
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003280void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003281{
Peter Maydell50013112015-04-26 16:49:24 +01003282 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003283}
3284
bellardaab33092005-10-30 20:48:42 +00003285/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003286void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3287 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003288{
3289 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003290 MemTxResult r;
3291
3292 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3293 if (result) {
3294 *result = r;
3295 }
3296}
3297
3298void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3299{
3300 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003301}
3302
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003303/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003304static inline void address_space_stw_internal(AddressSpace *as,
3305 hwaddr addr, uint32_t val,
3306 MemTxAttrs attrs,
3307 MemTxResult *result,
3308 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003309{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003310 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003311 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003312 hwaddr l = 2;
3313 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003314 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003315 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003316
Paolo Bonzini41063e12015-03-18 14:21:43 +01003317 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003318 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003319 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003320 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003321
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003322#if defined(TARGET_WORDS_BIGENDIAN)
3323 if (endian == DEVICE_LITTLE_ENDIAN) {
3324 val = bswap16(val);
3325 }
3326#else
3327 if (endian == DEVICE_BIG_ENDIAN) {
3328 val = bswap16(val);
3329 }
3330#endif
Peter Maydell50013112015-04-26 16:49:24 +01003331 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003332 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003333 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003334 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003335 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003336 switch (endian) {
3337 case DEVICE_LITTLE_ENDIAN:
3338 stw_le_p(ptr, val);
3339 break;
3340 case DEVICE_BIG_ENDIAN:
3341 stw_be_p(ptr, val);
3342 break;
3343 default:
3344 stw_p(ptr, val);
3345 break;
3346 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003347 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003348 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003349 }
Peter Maydell50013112015-04-26 16:49:24 +01003350 if (result) {
3351 *result = r;
3352 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003353 if (release_lock) {
3354 qemu_mutex_unlock_iothread();
3355 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003356 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003357}
3358
3359void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3360 MemTxAttrs attrs, MemTxResult *result)
3361{
3362 address_space_stw_internal(as, addr, val, attrs, result,
3363 DEVICE_NATIVE_ENDIAN);
3364}
3365
3366void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3367 MemTxAttrs attrs, MemTxResult *result)
3368{
3369 address_space_stw_internal(as, addr, val, attrs, result,
3370 DEVICE_LITTLE_ENDIAN);
3371}
3372
3373void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
3375{
3376 address_space_stw_internal(as, addr, val, attrs, result,
3377 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003378}
3379
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003380void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003381{
Peter Maydell50013112015-04-26 16:49:24 +01003382 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003383}
3384
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003385void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003386{
Peter Maydell50013112015-04-26 16:49:24 +01003387 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003388}
3389
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003390void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003391{
Peter Maydell50013112015-04-26 16:49:24 +01003392 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003393}
3394
bellardaab33092005-10-30 20:48:42 +00003395/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003396void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3397 MemTxAttrs attrs, MemTxResult *result)
3398{
3399 MemTxResult r;
3400 val = tswap64(val);
3401 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3402 if (result) {
3403 *result = r;
3404 }
3405}
3406
3407void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3408 MemTxAttrs attrs, MemTxResult *result)
3409{
3410 MemTxResult r;
3411 val = cpu_to_le64(val);
3412 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3413 if (result) {
3414 *result = r;
3415 }
3416}
3417void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3418 MemTxAttrs attrs, MemTxResult *result)
3419{
3420 MemTxResult r;
3421 val = cpu_to_be64(val);
3422 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3423 if (result) {
3424 *result = r;
3425 }
3426}
3427
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003428void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003429{
Peter Maydell50013112015-04-26 16:49:24 +01003430 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003431}
3432
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003433void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003434{
Peter Maydell50013112015-04-26 16:49:24 +01003435 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003436}
3437
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003438void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003439{
Peter Maydell50013112015-04-26 16:49:24 +01003440 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003441}
3442
aliguori5e2972f2009-03-28 17:51:36 +00003443/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003444int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003445 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003446{
3447 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003448 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003449 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003450
3451 while (len > 0) {
3452 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003453 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003454 /* if no physical page mapped, return an error */
3455 if (phys_addr == -1)
3456 return -1;
3457 l = (page + TARGET_PAGE_SIZE) - addr;
3458 if (l > len)
3459 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003460 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003461 if (is_write) {
3462 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3463 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003464 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3465 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003466 }
bellard13eb76e2004-01-24 15:23:36 +00003467 len -= l;
3468 buf += l;
3469 addr += l;
3470 }
3471 return 0;
3472}
Paul Brooka68fe892010-03-01 00:08:59 +00003473#endif
bellard13eb76e2004-01-24 15:23:36 +00003474
Blue Swirl8e4a4242013-01-06 18:30:17 +00003475/*
3476 * A helper function for the _utterly broken_ virtio device model to find out if
3477 * it's running on a big endian machine. Don't do this at home kids!
3478 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003479bool target_words_bigendian(void);
3480bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003481{
3482#if defined(TARGET_WORDS_BIGENDIAN)
3483 return true;
3484#else
3485 return false;
3486#endif
3487}
3488
Wen Congyang76f35532012-05-07 12:04:18 +08003489#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003490bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003491{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003492 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003493 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003494 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003495
Paolo Bonzini41063e12015-03-18 14:21:43 +01003496 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003497 mr = address_space_translate(&address_space_memory,
3498 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003499
Paolo Bonzini41063e12015-03-18 14:21:43 +01003500 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3501 rcu_read_unlock();
3502 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003503}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003504
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003505int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003506{
3507 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003508 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003509
Mike Day0dc3f442013-09-05 14:41:35 -04003510 rcu_read_lock();
3511 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003512 ret = func(block->idstr, block->host, block->offset,
3513 block->used_length, opaque);
3514 if (ret) {
3515 break;
3516 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003517 }
Mike Day0dc3f442013-09-05 14:41:35 -04003518 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003519 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003520}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003521#endif