blob: 07dfeae4d802f69348bcfb2b498cbf2969c83421 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020093__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300481static bool cpu_common_crash_occurred_needed(void *opaque)
482{
483 CPUState *cpu = opaque;
484
485 return cpu->crash_occurred;
486}
487
488static const VMStateDescription vmstate_cpu_common_crash_occurred = {
489 .name = "cpu_common/crash_occurred",
490 .version_id = 1,
491 .minimum_version_id = 1,
492 .needed = cpu_common_crash_occurred_needed,
493 .fields = (VMStateField[]) {
494 VMSTATE_BOOL(crash_occurred, CPUState),
495 VMSTATE_END_OF_LIST()
496 }
497};
498
Andreas Färber1a1562f2013-06-17 04:09:11 +0200499const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200500 .name = "cpu_common",
501 .version_id = 1,
502 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400503 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200504 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200505 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100506 VMSTATE_UINT32(halted, CPUState),
507 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200508 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400509 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200510 .subsections = (const VMStateDescription*[]) {
511 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300512 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200513 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200514 }
515};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200516
pbrook9656f322008-07-01 20:01:19 +0000517#endif
518
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100519CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400520{
Andreas Färberbdc44642013-06-24 23:50:24 +0200521 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400522
Andreas Färberbdc44642013-06-24 23:50:24 +0200523 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100524 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200525 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100526 }
Glauber Costa950f1472009-06-09 12:15:18 -0400527 }
528
Andreas Färberbdc44642013-06-24 23:50:24 +0200529 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400530}
531
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000532#if !defined(CONFIG_USER_ONLY)
533void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
534{
535 /* We only support one address space per cpu at the moment. */
536 assert(cpu->as == as);
537
538 if (cpu->tcg_as_listener) {
539 memory_listener_unregister(cpu->tcg_as_listener);
540 } else {
541 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
542 }
543 cpu->tcg_as_listener->commit = tcg_commit;
544 memory_listener_register(cpu->tcg_as_listener, as);
545}
546#endif
547
Bharata B Raob7bca732015-06-23 19:31:13 -0700548#ifndef CONFIG_USER_ONLY
549static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
550
551static int cpu_get_free_index(Error **errp)
552{
553 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
554
555 if (cpu >= MAX_CPUMASK_BITS) {
556 error_setg(errp, "Trying to use more CPUs than max of %d",
557 MAX_CPUMASK_BITS);
558 return -1;
559 }
560
561 bitmap_set(cpu_index_map, cpu, 1);
562 return cpu;
563}
564
565void cpu_exec_exit(CPUState *cpu)
566{
567 if (cpu->cpu_index == -1) {
568 /* cpu_index was never allocated by this @cpu or was already freed. */
569 return;
570 }
571
572 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
573 cpu->cpu_index = -1;
574}
575#else
576
577static int cpu_get_free_index(Error **errp)
578{
579 CPUState *some_cpu;
580 int cpu_index = 0;
581
582 CPU_FOREACH(some_cpu) {
583 cpu_index++;
584 }
585 return cpu_index;
586}
587
588void cpu_exec_exit(CPUState *cpu)
589{
590}
591#endif
592
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700593void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000594{
Andreas Färberb170fce2013-01-20 20:23:22 +0100595 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000596 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700597 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000598
Eduardo Habkost291135b2015-04-27 17:00:33 -0300599#ifndef CONFIG_USER_ONLY
600 cpu->as = &address_space_memory;
601 cpu->thread_id = qemu_get_thread_id();
602 cpu_reload_memory_map(cpu);
603#endif
604
pbrookc2764712009-03-07 15:24:59 +0000605#if defined(CONFIG_USER_ONLY)
606 cpu_list_lock();
607#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700608 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
609 if (local_err) {
610 error_propagate(errp, local_err);
611#if defined(CONFIG_USER_ONLY)
612 cpu_list_unlock();
613#endif
614 return;
bellard6a00d602005-11-21 23:25:50 +0000615 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200616 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000617#if defined(CONFIG_USER_ONLY)
618 cpu_list_unlock();
619#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200620 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
621 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
622 }
pbrookb3c77242008-06-30 16:31:04 +0000623#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600624 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700625 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100626 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200627 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000628#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100629 if (cc->vmsd != NULL) {
630 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
631 }
bellardfd6ce8f2003-05-14 19:00:11 +0000632}
633
Paul Brook94df27f2010-02-28 23:47:45 +0000634#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200635static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000636{
637 tb_invalidate_phys_page_range(pc, pc + 1, 0);
638}
639#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200640static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400641{
Max Filippove8262a12013-09-27 22:29:17 +0400642 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
643 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000644 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100645 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400646 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400647}
bellardc27004e2005-01-03 23:35:10 +0000648#endif
bellardd720b932004-04-25 17:57:43 +0000649
Paul Brookc527ee82010-03-01 03:31:14 +0000650#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200651void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000652
653{
654}
655
Peter Maydell3ee887e2014-09-12 14:06:48 +0100656int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
657 int flags)
658{
659 return -ENOSYS;
660}
661
662void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
663{
664}
665
Andreas Färber75a34032013-09-02 16:57:02 +0200666int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000667 int flags, CPUWatchpoint **watchpoint)
668{
669 return -ENOSYS;
670}
671#else
pbrook6658ffb2007-03-16 23:58:11 +0000672/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200673int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000674 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000675{
aliguoric0ce9982008-11-25 22:13:57 +0000676 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000677
Peter Maydell05068c02014-09-12 14:06:48 +0100678 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700679 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200680 error_report("tried to set invalid watchpoint at %"
681 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000682 return -EINVAL;
683 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500684 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000685
aliguoria1d1bb32008-11-18 20:07:32 +0000686 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100687 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000688 wp->flags = flags;
689
aliguori2dc9f412008-11-18 20:56:59 +0000690 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200691 if (flags & BP_GDB) {
692 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
693 } else {
694 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
695 }
aliguoria1d1bb32008-11-18 20:07:32 +0000696
Andreas Färber31b030d2013-09-04 01:29:02 +0200697 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000698
699 if (watchpoint)
700 *watchpoint = wp;
701 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000702}
703
aliguoria1d1bb32008-11-18 20:07:32 +0000704/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200705int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000706 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000707{
aliguoria1d1bb32008-11-18 20:07:32 +0000708 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000709
Andreas Färberff4700b2013-08-26 18:23:18 +0200710 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100711 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000712 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200713 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000714 return 0;
715 }
716 }
aliguoria1d1bb32008-11-18 20:07:32 +0000717 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000718}
719
aliguoria1d1bb32008-11-18 20:07:32 +0000720/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200721void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000722{
Andreas Färberff4700b2013-08-26 18:23:18 +0200723 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000724
Andreas Färber31b030d2013-09-04 01:29:02 +0200725 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000726
Anthony Liguori7267c092011-08-20 22:09:37 -0500727 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000728}
729
aliguoria1d1bb32008-11-18 20:07:32 +0000730/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200731void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000732{
aliguoric0ce9982008-11-25 22:13:57 +0000733 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000734
Andreas Färberff4700b2013-08-26 18:23:18 +0200735 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200736 if (wp->flags & mask) {
737 cpu_watchpoint_remove_by_ref(cpu, wp);
738 }
aliguoric0ce9982008-11-25 22:13:57 +0000739 }
aliguoria1d1bb32008-11-18 20:07:32 +0000740}
Peter Maydell05068c02014-09-12 14:06:48 +0100741
742/* Return true if this watchpoint address matches the specified
743 * access (ie the address range covered by the watchpoint overlaps
744 * partially or completely with the address range covered by the
745 * access).
746 */
747static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
748 vaddr addr,
749 vaddr len)
750{
751 /* We know the lengths are non-zero, but a little caution is
752 * required to avoid errors in the case where the range ends
753 * exactly at the top of the address space and so addr + len
754 * wraps round to zero.
755 */
756 vaddr wpend = wp->vaddr + wp->len - 1;
757 vaddr addrend = addr + len - 1;
758
759 return !(addr > wpend || wp->vaddr > addrend);
760}
761
Paul Brookc527ee82010-03-01 03:31:14 +0000762#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000763
764/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200765int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000766 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000767{
aliguoric0ce9982008-11-25 22:13:57 +0000768 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000769
Anthony Liguori7267c092011-08-20 22:09:37 -0500770 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000771
772 bp->pc = pc;
773 bp->flags = flags;
774
aliguori2dc9f412008-11-18 20:56:59 +0000775 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200776 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200777 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200778 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200779 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200780 }
aliguoria1d1bb32008-11-18 20:07:32 +0000781
Andreas Färberf0c3c502013-08-26 21:22:53 +0200782 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000783
Andreas Färber00b941e2013-06-29 18:55:54 +0200784 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000785 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200786 }
aliguoria1d1bb32008-11-18 20:07:32 +0000787 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000788}
789
790/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200791int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000792{
aliguoria1d1bb32008-11-18 20:07:32 +0000793 CPUBreakpoint *bp;
794
Andreas Färberf0c3c502013-08-26 21:22:53 +0200795 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000796 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200797 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000798 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000799 }
bellard4c3a88a2003-07-26 12:06:08 +0000800 }
aliguoria1d1bb32008-11-18 20:07:32 +0000801 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000802}
803
aliguoria1d1bb32008-11-18 20:07:32 +0000804/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200805void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000806{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200807 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
808
809 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000810
Anthony Liguori7267c092011-08-20 22:09:37 -0500811 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000812}
813
814/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200815void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000816{
aliguoric0ce9982008-11-25 22:13:57 +0000817 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000818
Andreas Färberf0c3c502013-08-26 21:22:53 +0200819 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820 if (bp->flags & mask) {
821 cpu_breakpoint_remove_by_ref(cpu, bp);
822 }
aliguoric0ce9982008-11-25 22:13:57 +0000823 }
bellard4c3a88a2003-07-26 12:06:08 +0000824}
825
bellardc33a3462003-07-29 20:50:33 +0000826/* enable or disable single step mode. EXCP_DEBUG is returned by the
827 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200828void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000829{
Andreas Färbered2803d2013-06-21 20:20:45 +0200830 if (cpu->singlestep_enabled != enabled) {
831 cpu->singlestep_enabled = enabled;
832 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200833 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200834 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100835 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000836 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700837 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000838 }
bellardc33a3462003-07-29 20:50:33 +0000839 }
bellardc33a3462003-07-29 20:50:33 +0000840}
841
Andreas Färbera47dddd2013-09-03 17:38:47 +0200842void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000843{
844 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000845 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000846
847 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000848 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000849 fprintf(stderr, "qemu: fatal: ");
850 vfprintf(stderr, fmt, ap);
851 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200852 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000853 if (qemu_log_enabled()) {
854 qemu_log("qemu: fatal: ");
855 qemu_log_vprintf(fmt, ap2);
856 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200857 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000858 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000859 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000860 }
pbrook493ae1f2007-11-23 16:53:59 +0000861 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000862 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200863#if defined(CONFIG_USER_ONLY)
864 {
865 struct sigaction act;
866 sigfillset(&act.sa_mask);
867 act.sa_handler = SIG_DFL;
868 sigaction(SIGABRT, &act, NULL);
869 }
870#endif
bellard75012672003-06-21 13:11:07 +0000871 abort();
872}
873
bellard01243112004-01-04 15:48:17 +0000874#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400875/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200876static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
877{
878 RAMBlock *block;
879
Paolo Bonzini43771532013-09-09 17:58:40 +0200880 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200881 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200882 goto found;
883 }
Mike Day0dc3f442013-09-05 14:41:35 -0400884 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200885 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200886 goto found;
887 }
888 }
889
890 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
891 abort();
892
893found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200894 /* It is safe to write mru_block outside the iothread lock. This
895 * is what happens:
896 *
897 * mru_block = xxx
898 * rcu_read_unlock()
899 * xxx removed from list
900 * rcu_read_lock()
901 * read mru_block
902 * mru_block = NULL;
903 * call_rcu(reclaim_ramblock, xxx);
904 * rcu_read_unlock()
905 *
906 * atomic_rcu_set is not needed here. The block was already published
907 * when it was placed into the list. Here we're just making an extra
908 * copy of the pointer.
909 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200910 ram_list.mru_block = block;
911 return block;
912}
913
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200914static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000915{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200916 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200917 RAMBlock *block;
918 ram_addr_t end;
919
920 end = TARGET_PAGE_ALIGN(start + length);
921 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000922
Mike Day0dc3f442013-09-05 14:41:35 -0400923 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200924 block = qemu_get_ram_block(start);
925 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200926 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000927 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400928 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200929}
930
931/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000932bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
933 ram_addr_t length,
934 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200935{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000936 unsigned long end, page;
937 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200938
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000939 if (length == 0) {
940 return false;
941 }
942
943 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
944 page = start >> TARGET_PAGE_BITS;
945 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
946 page, end - page);
947
948 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200949 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200950 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000951
952 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000953}
954
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100955/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200956hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200957 MemoryRegionSection *section,
958 target_ulong vaddr,
959 hwaddr paddr, hwaddr xlat,
960 int prot,
961 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000962{
Avi Kivitya8170e52012-10-23 12:30:10 +0200963 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000964 CPUWatchpoint *wp;
965
Blue Swirlcc5bea62012-04-14 14:56:48 +0000966 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000967 /* Normal RAM. */
968 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200969 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000970 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200971 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000972 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200973 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000974 }
975 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +0100976 AddressSpaceDispatch *d;
977
978 d = atomic_rcu_read(&section->address_space->dispatch);
979 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200980 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000981 }
982
983 /* Make accesses to pages with watchpoints go via the
984 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200985 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100986 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000987 /* Avoid trapping reads of pages with a write breakpoint. */
988 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200989 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000990 *address |= TLB_MMIO;
991 break;
992 }
993 }
994 }
995
996 return iotlb;
997}
bellard9fa3e852004-01-04 18:06:42 +0000998#endif /* defined(CONFIG_USER_ONLY) */
999
pbrooke2eef172008-06-08 01:09:01 +00001000#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001001
Anthony Liguoric227f092009-10-01 16:12:16 -05001002static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001003 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001004static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001005
Igor Mammedova2b257d2014-10-31 16:38:37 +00001006static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1007 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001008
1009/*
1010 * Set a custom physical guest memory alloator.
1011 * Accelerators with unusual needs may need this. Hopefully, we can
1012 * get rid of it eventually.
1013 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001014void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001015{
1016 phys_mem_alloc = alloc;
1017}
1018
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001019static uint16_t phys_section_add(PhysPageMap *map,
1020 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001021{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001022 /* The physical section number is ORed with a page-aligned
1023 * pointer to produce the iotlb entries. Thus it should
1024 * never overflow into the page-aligned value.
1025 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001026 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001027
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001028 if (map->sections_nb == map->sections_nb_alloc) {
1029 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1030 map->sections = g_renew(MemoryRegionSection, map->sections,
1031 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001032 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001033 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001034 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001035 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001036}
1037
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001038static void phys_section_destroy(MemoryRegion *mr)
1039{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001040 memory_region_unref(mr);
1041
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001042 if (mr->subpage) {
1043 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001044 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001045 g_free(subpage);
1046 }
1047}
1048
Paolo Bonzini60926662013-05-29 12:30:26 +02001049static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001050{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001051 while (map->sections_nb > 0) {
1052 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001053 phys_section_destroy(section->mr);
1054 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001055 g_free(map->sections);
1056 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001057}
1058
Avi Kivityac1970f2012-10-03 16:22:53 +02001059static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001060{
1061 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001062 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001063 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001064 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001065 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001066 MemoryRegionSection subsection = {
1067 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001068 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001069 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001070 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001071
Avi Kivityf3705d52012-03-08 16:16:34 +02001072 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001073
Avi Kivityf3705d52012-03-08 16:16:34 +02001074 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001075 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001076 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001077 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001078 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001079 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001080 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001081 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001082 }
1083 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001084 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001085 subpage_register(subpage, start, end,
1086 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001087}
1088
1089
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001090static void register_multipage(AddressSpaceDispatch *d,
1091 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001092{
Avi Kivitya8170e52012-10-23 12:30:10 +02001093 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001094 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001095 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1096 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001097
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001098 assert(num_pages);
1099 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001100}
1101
Avi Kivityac1970f2012-10-03 16:22:53 +02001102static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001103{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001104 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001105 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001106 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001107 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001108
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001109 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1110 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1111 - now.offset_within_address_space;
1112
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001113 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001114 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001115 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001116 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001117 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001118 while (int128_ne(remain.size, now.size)) {
1119 remain.size = int128_sub(remain.size, now.size);
1120 remain.offset_within_address_space += int128_get64(now.size);
1121 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001122 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001123 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001124 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001125 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001126 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001127 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001128 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001129 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001130 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001131 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001132 }
1133}
1134
Sheng Yang62a27442010-01-26 19:21:16 +08001135void qemu_flush_coalesced_mmio_buffer(void)
1136{
1137 if (kvm_enabled())
1138 kvm_flush_coalesced_mmio_buffer();
1139}
1140
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001141void qemu_mutex_lock_ramlist(void)
1142{
1143 qemu_mutex_lock(&ram_list.mutex);
1144}
1145
1146void qemu_mutex_unlock_ramlist(void)
1147{
1148 qemu_mutex_unlock(&ram_list.mutex);
1149}
1150
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001151#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001152
1153#include <sys/vfs.h>
1154
1155#define HUGETLBFS_MAGIC 0x958458f6
1156
Hu Taofc7a5802014-09-09 13:28:01 +08001157static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001158{
1159 struct statfs fs;
1160 int ret;
1161
1162 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001163 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001164 } while (ret != 0 && errno == EINTR);
1165
1166 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001167 error_setg_errno(errp, errno, "failed to get page size of file %s",
1168 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001169 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001170 }
1171
1172 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001173 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001174
1175 return fs.f_bsize;
1176}
1177
Alex Williamson04b16652010-07-02 11:13:17 -06001178static void *file_ram_alloc(RAMBlock *block,
1179 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001180 const char *path,
1181 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001182{
1183 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001184 char *sanitized_name;
1185 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001186 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001187 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001188 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001189 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001190
Hu Taofc7a5802014-09-09 13:28:01 +08001191 hpagesize = gethugepagesize(path, &local_err);
1192 if (local_err) {
1193 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001194 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001196 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197
1198 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001199 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1200 "or larger than huge page size 0x%" PRIx64,
1201 memory, hpagesize);
1202 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001203 }
1204
1205 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001206 error_setg(errp,
1207 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001208 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209 }
1210
Peter Feiner8ca761f2013-03-04 13:54:25 -05001211 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001212 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001213 for (c = sanitized_name; *c != '\0'; c++) {
1214 if (*c == '/')
1215 *c = '_';
1216 }
1217
1218 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1219 sanitized_name);
1220 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221
1222 fd = mkstemp(filename);
1223 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001224 error_setg_errno(errp, errno,
1225 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001226 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001227 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001228 }
1229 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001230 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231
Chen Hanxiao9284f312015-07-24 11:12:03 +08001232 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001233
1234 /*
1235 * ftruncate is not supported by hugetlbfs in older
1236 * hosts, so don't bother bailing out on errors.
1237 * If anything goes wrong with it under other filesystems,
1238 * mmap will fail.
1239 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001240 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001241 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001242 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001243
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001244 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1245 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1246 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001247 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001248 error_setg_errno(errp, errno,
1249 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001250 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001251 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001252 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001253
1254 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001255 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001256 }
1257
Alex Williamson04b16652010-07-02 11:13:17 -06001258 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001259 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001260
1261error:
1262 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001263 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001264 exit(1);
1265 }
1266 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267}
1268#endif
1269
Mike Day0dc3f442013-09-05 14:41:35 -04001270/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001271static ram_addr_t find_ram_offset(ram_addr_t size)
1272{
Alex Williamson04b16652010-07-02 11:13:17 -06001273 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001274 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001275
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001276 assert(size != 0); /* it would hand out same offset multiple times */
1277
Mike Day0dc3f442013-09-05 14:41:35 -04001278 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001279 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001280 }
Alex Williamson04b16652010-07-02 11:13:17 -06001281
Mike Day0dc3f442013-09-05 14:41:35 -04001282 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001283 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001284
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001285 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001286
Mike Day0dc3f442013-09-05 14:41:35 -04001287 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001288 if (next_block->offset >= end) {
1289 next = MIN(next, next_block->offset);
1290 }
1291 }
1292 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001293 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001294 mingap = next - end;
1295 }
1296 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001297
1298 if (offset == RAM_ADDR_MAX) {
1299 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1300 (uint64_t)size);
1301 abort();
1302 }
1303
Alex Williamson04b16652010-07-02 11:13:17 -06001304 return offset;
1305}
1306
Juan Quintela652d7ec2012-07-20 10:37:54 +02001307ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001308{
Alex Williamsond17b5282010-06-25 11:08:38 -06001309 RAMBlock *block;
1310 ram_addr_t last = 0;
1311
Mike Day0dc3f442013-09-05 14:41:35 -04001312 rcu_read_lock();
1313 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001314 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001315 }
Mike Day0dc3f442013-09-05 14:41:35 -04001316 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001317 return last;
1318}
1319
Jason Baronddb97f12012-08-02 15:44:16 -04001320static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1321{
1322 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001323
1324 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001325 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001326 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1327 if (ret) {
1328 perror("qemu_madvise");
1329 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1330 "but dump_guest_core=off specified\n");
1331 }
1332 }
1333}
1334
Mike Day0dc3f442013-09-05 14:41:35 -04001335/* Called within an RCU critical section, or while the ramlist lock
1336 * is held.
1337 */
Hu Tao20cfe882014-04-02 15:13:26 +08001338static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001339{
Hu Tao20cfe882014-04-02 15:13:26 +08001340 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001341
Mike Day0dc3f442013-09-05 14:41:35 -04001342 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001343 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001344 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001345 }
1346 }
Hu Tao20cfe882014-04-02 15:13:26 +08001347
1348 return NULL;
1349}
1350
Mike Dayae3a7042013-09-05 14:41:35 -04001351/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001352void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1353{
Mike Dayae3a7042013-09-05 14:41:35 -04001354 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001355
Mike Day0dc3f442013-09-05 14:41:35 -04001356 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001357 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001358 assert(new_block);
1359 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001360
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001361 if (dev) {
1362 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001363 if (id) {
1364 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001365 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001366 }
1367 }
1368 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1369
Mike Day0dc3f442013-09-05 14:41:35 -04001370 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001371 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001372 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1373 new_block->idstr);
1374 abort();
1375 }
1376 }
Mike Day0dc3f442013-09-05 14:41:35 -04001377 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001378}
1379
Mike Dayae3a7042013-09-05 14:41:35 -04001380/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001381void qemu_ram_unset_idstr(ram_addr_t addr)
1382{
Mike Dayae3a7042013-09-05 14:41:35 -04001383 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001384
Mike Dayae3a7042013-09-05 14:41:35 -04001385 /* FIXME: arch_init.c assumes that this is not called throughout
1386 * migration. Ignore the problem since hot-unplug during migration
1387 * does not work anyway.
1388 */
1389
Mike Day0dc3f442013-09-05 14:41:35 -04001390 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001391 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001392 if (block) {
1393 memset(block->idstr, 0, sizeof(block->idstr));
1394 }
Mike Day0dc3f442013-09-05 14:41:35 -04001395 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001396}
1397
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001398static int memory_try_enable_merging(void *addr, size_t len)
1399{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001400 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001401 /* disabled by the user */
1402 return 0;
1403 }
1404
1405 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1406}
1407
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001408/* Only legal before guest might have detected the memory size: e.g. on
1409 * incoming migration, or right after reset.
1410 *
1411 * As memory core doesn't know how is memory accessed, it is up to
1412 * resize callback to update device state and/or add assertions to detect
1413 * misuse, if necessary.
1414 */
1415int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1416{
1417 RAMBlock *block = find_ram_block(base);
1418
1419 assert(block);
1420
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001421 newsize = TARGET_PAGE_ALIGN(newsize);
1422
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001423 if (block->used_length == newsize) {
1424 return 0;
1425 }
1426
1427 if (!(block->flags & RAM_RESIZEABLE)) {
1428 error_setg_errno(errp, EINVAL,
1429 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1430 " in != 0x" RAM_ADDR_FMT, block->idstr,
1431 newsize, block->used_length);
1432 return -EINVAL;
1433 }
1434
1435 if (block->max_length < newsize) {
1436 error_setg_errno(errp, EINVAL,
1437 "Length too large: %s: 0x" RAM_ADDR_FMT
1438 " > 0x" RAM_ADDR_FMT, block->idstr,
1439 newsize, block->max_length);
1440 return -EINVAL;
1441 }
1442
1443 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1444 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001445 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1446 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001447 memory_region_set_size(block->mr, newsize);
1448 if (block->resized) {
1449 block->resized(block->idstr, newsize, block->host);
1450 }
1451 return 0;
1452}
1453
Hu Taoef701d72014-09-09 13:27:54 +08001454static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001455{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001456 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001457 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001458 ram_addr_t old_ram_size, new_ram_size;
1459
1460 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001461
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001462 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001463 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001464
1465 if (!new_block->host) {
1466 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001467 xen_ram_alloc(new_block->offset, new_block->max_length,
1468 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001469 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001470 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001471 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001472 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001473 error_setg_errno(errp, errno,
1474 "cannot set up guest memory '%s'",
1475 memory_region_name(new_block->mr));
1476 qemu_mutex_unlock_ramlist();
1477 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001478 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001479 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001480 }
1481 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001482
Li Zhijiandd631692015-07-02 20:18:06 +08001483 new_ram_size = MAX(old_ram_size,
1484 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1485 if (new_ram_size > old_ram_size) {
1486 migration_bitmap_extend(old_ram_size, new_ram_size);
1487 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001488 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1489 * QLIST (which has an RCU-friendly variant) does not have insertion at
1490 * tail, so save the last element in last_block.
1491 */
Mike Day0dc3f442013-09-05 14:41:35 -04001492 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001493 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001494 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001495 break;
1496 }
1497 }
1498 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001499 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001500 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001501 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001502 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001503 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001504 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001505 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001506
Mike Day0dc3f442013-09-05 14:41:35 -04001507 /* Write list before version */
1508 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001509 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001510 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001511
Juan Quintela2152f5c2013-10-08 13:52:02 +02001512 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1513
1514 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001515 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001516
1517 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001518 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1519 ram_list.dirty_memory[i] =
1520 bitmap_zero_extend(ram_list.dirty_memory[i],
1521 old_ram_size, new_ram_size);
1522 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001523 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001524 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001525 new_block->used_length,
1526 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001527
Paolo Bonzinia904c912015-01-21 16:18:35 +01001528 if (new_block->host) {
1529 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1530 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1531 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1532 if (kvm_enabled()) {
1533 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1534 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001535 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001536
1537 return new_block->offset;
1538}
1539
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001540#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001541ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001542 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001543 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001544{
1545 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001546 ram_addr_t addr;
1547 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001548
1549 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001550 error_setg(errp, "-mem-path not supported with Xen");
1551 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001552 }
1553
1554 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1555 /*
1556 * file_ram_alloc() needs to allocate just like
1557 * phys_mem_alloc, but we haven't bothered to provide
1558 * a hook there.
1559 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001560 error_setg(errp,
1561 "-mem-path not supported with this accelerator");
1562 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001563 }
1564
1565 size = TARGET_PAGE_ALIGN(size);
1566 new_block = g_malloc0(sizeof(*new_block));
1567 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001568 new_block->used_length = size;
1569 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001570 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001571 new_block->host = file_ram_alloc(new_block, size,
1572 mem_path, errp);
1573 if (!new_block->host) {
1574 g_free(new_block);
1575 return -1;
1576 }
1577
Hu Taoef701d72014-09-09 13:27:54 +08001578 addr = ram_block_add(new_block, &local_err);
1579 if (local_err) {
1580 g_free(new_block);
1581 error_propagate(errp, local_err);
1582 return -1;
1583 }
1584 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001585}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001586#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001587
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001588static
1589ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1590 void (*resized)(const char*,
1591 uint64_t length,
1592 void *host),
1593 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001594 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001595{
1596 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001597 ram_addr_t addr;
1598 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001599
1600 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001601 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001602 new_block = g_malloc0(sizeof(*new_block));
1603 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001604 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001605 new_block->used_length = size;
1606 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001607 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001608 new_block->fd = -1;
1609 new_block->host = host;
1610 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001611 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001612 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001613 if (resizeable) {
1614 new_block->flags |= RAM_RESIZEABLE;
1615 }
Hu Taoef701d72014-09-09 13:27:54 +08001616 addr = ram_block_add(new_block, &local_err);
1617 if (local_err) {
1618 g_free(new_block);
1619 error_propagate(errp, local_err);
1620 return -1;
1621 }
1622 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001623}
1624
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001625ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1626 MemoryRegion *mr, Error **errp)
1627{
1628 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1629}
1630
Hu Taoef701d72014-09-09 13:27:54 +08001631ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001632{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001633 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1634}
1635
1636ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1637 void (*resized)(const char*,
1638 uint64_t length,
1639 void *host),
1640 MemoryRegion *mr, Error **errp)
1641{
1642 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001643}
bellarde9a1ab12007-02-08 23:08:38 +00001644
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001645void qemu_ram_free_from_ptr(ram_addr_t addr)
1646{
1647 RAMBlock *block;
1648
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001649 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001650 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001651 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001652 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001653 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001654 /* Write list before version */
1655 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001656 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001657 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001658 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001659 }
1660 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001661 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001662}
1663
Paolo Bonzini43771532013-09-09 17:58:40 +02001664static void reclaim_ramblock(RAMBlock *block)
1665{
1666 if (block->flags & RAM_PREALLOC) {
1667 ;
1668 } else if (xen_enabled()) {
1669 xen_invalidate_map_cache_entry(block->host);
1670#ifndef _WIN32
1671 } else if (block->fd >= 0) {
1672 munmap(block->host, block->max_length);
1673 close(block->fd);
1674#endif
1675 } else {
1676 qemu_anon_ram_free(block->host, block->max_length);
1677 }
1678 g_free(block);
1679}
1680
Anthony Liguoric227f092009-10-01 16:12:16 -05001681void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001682{
Alex Williamson04b16652010-07-02 11:13:17 -06001683 RAMBlock *block;
1684
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001685 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001686 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001687 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001688 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001689 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001690 /* Write list before version */
1691 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001692 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001693 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001694 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001695 }
1696 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001697 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001698}
1699
Huang Yingcd19cfa2011-03-02 08:56:19 +01001700#ifndef _WIN32
1701void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1702{
1703 RAMBlock *block;
1704 ram_addr_t offset;
1705 int flags;
1706 void *area, *vaddr;
1707
Mike Day0dc3f442013-09-05 14:41:35 -04001708 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001709 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001710 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001711 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001712 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001713 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001714 } else if (xen_enabled()) {
1715 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001716 } else {
1717 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001718 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001719 flags |= (block->flags & RAM_SHARED ?
1720 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001721 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1722 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001723 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001724 /*
1725 * Remap needs to match alloc. Accelerators that
1726 * set phys_mem_alloc never remap. If they did,
1727 * we'd need a remap hook here.
1728 */
1729 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1730
Huang Yingcd19cfa2011-03-02 08:56:19 +01001731 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1732 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1733 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001734 }
1735 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001736 fprintf(stderr, "Could not remap addr: "
1737 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001738 length, addr);
1739 exit(1);
1740 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001741 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001742 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001743 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001744 }
1745 }
1746}
1747#endif /* !_WIN32 */
1748
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001749int qemu_get_ram_fd(ram_addr_t addr)
1750{
Mike Dayae3a7042013-09-05 14:41:35 -04001751 RAMBlock *block;
1752 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001753
Mike Day0dc3f442013-09-05 14:41:35 -04001754 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001755 block = qemu_get_ram_block(addr);
1756 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001757 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001758 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001759}
1760
Damjan Marion3fd74b82014-06-26 23:01:32 +02001761void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1762{
Mike Dayae3a7042013-09-05 14:41:35 -04001763 RAMBlock *block;
1764 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001765
Mike Day0dc3f442013-09-05 14:41:35 -04001766 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001767 block = qemu_get_ram_block(addr);
1768 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001769 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001770 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001771}
1772
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001773/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001774 * This should not be used for general purpose DMA. Use address_space_map
1775 * or address_space_rw instead. For local memory (e.g. video ram) that the
1776 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001777 *
1778 * By the time this function returns, the returned pointer is not protected
1779 * by RCU anymore. If the caller is not within an RCU critical section and
1780 * does not hold the iothread lock, it must have other means of protecting the
1781 * pointer, such as a reference to the region that includes the incoming
1782 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001783 */
1784void *qemu_get_ram_ptr(ram_addr_t addr)
1785{
Mike Dayae3a7042013-09-05 14:41:35 -04001786 RAMBlock *block;
1787 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001788
Mike Day0dc3f442013-09-05 14:41:35 -04001789 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001790 block = qemu_get_ram_block(addr);
1791
1792 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001793 /* We need to check if the requested address is in the RAM
1794 * because we don't want to map the entire memory in QEMU.
1795 * In that case just map until the end of the page.
1796 */
1797 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001798 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001799 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001800 }
Mike Dayae3a7042013-09-05 14:41:35 -04001801
1802 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001803 }
Mike Dayae3a7042013-09-05 14:41:35 -04001804 ptr = ramblock_ptr(block, addr - block->offset);
1805
Mike Day0dc3f442013-09-05 14:41:35 -04001806unlock:
1807 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001808 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001809}
1810
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001811/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001812 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001813 *
1814 * By the time this function returns, the returned pointer is not protected
1815 * by RCU anymore. If the caller is not within an RCU critical section and
1816 * does not hold the iothread lock, it must have other means of protecting the
1817 * pointer, such as a reference to the region that includes the incoming
1818 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001819 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001820static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001821{
Mike Dayae3a7042013-09-05 14:41:35 -04001822 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001823 if (*size == 0) {
1824 return NULL;
1825 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001826 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001827 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001828 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001829 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001830 rcu_read_lock();
1831 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001832 if (addr - block->offset < block->max_length) {
1833 if (addr - block->offset + *size > block->max_length)
1834 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001835 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001836 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001837 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001838 }
1839 }
1840
1841 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1842 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001843 }
1844}
1845
Paolo Bonzini7443b432013-06-03 12:44:02 +02001846/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001847 * (typically a TLB entry) back to a ram offset.
1848 *
1849 * By the time this function returns, the returned pointer is not protected
1850 * by RCU anymore. If the caller is not within an RCU critical section and
1851 * does not hold the iothread lock, it must have other means of protecting the
1852 * pointer, such as a reference to the region that includes the incoming
1853 * ram_addr_t.
1854 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001855MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001856{
pbrook94a6b542009-04-11 17:15:54 +00001857 RAMBlock *block;
1858 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001859 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001860
Jan Kiszka868bb332011-06-21 22:59:09 +02001861 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001862 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001863 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001864 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001865 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001866 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001867 }
1868
Mike Day0dc3f442013-09-05 14:41:35 -04001869 rcu_read_lock();
1870 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001871 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001872 goto found;
1873 }
1874
Mike Day0dc3f442013-09-05 14:41:35 -04001875 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001876 /* This case append when the block is not mapped. */
1877 if (block->host == NULL) {
1878 continue;
1879 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001880 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001881 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001882 }
pbrook94a6b542009-04-11 17:15:54 +00001883 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001884
Mike Day0dc3f442013-09-05 14:41:35 -04001885 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001886 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001887
1888found:
1889 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001890 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001891 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001892 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001893}
Alex Williamsonf471a172010-06-11 11:11:42 -06001894
Avi Kivitya8170e52012-10-23 12:30:10 +02001895static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001896 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001897{
Juan Quintela52159192013-10-08 12:44:04 +02001898 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001899 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001900 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001901 switch (size) {
1902 case 1:
1903 stb_p(qemu_get_ram_ptr(ram_addr), val);
1904 break;
1905 case 2:
1906 stw_p(qemu_get_ram_ptr(ram_addr), val);
1907 break;
1908 case 4:
1909 stl_p(qemu_get_ram_ptr(ram_addr), val);
1910 break;
1911 default:
1912 abort();
1913 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001914 /* Set both VGA and migration bits for simplicity and to remove
1915 * the notdirty callback faster.
1916 */
1917 cpu_physical_memory_set_dirty_range(ram_addr, size,
1918 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001919 /* we remove the notdirty callback only if the code has been
1920 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001921 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001922 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001923 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001924 }
bellard1ccde1c2004-02-06 19:46:14 +00001925}
1926
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001927static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1928 unsigned size, bool is_write)
1929{
1930 return is_write;
1931}
1932
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001933static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001934 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001935 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001936 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001937};
1938
pbrook0f459d12008-06-09 00:20:13 +00001939/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001940static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001941{
Andreas Färber93afead2013-08-26 03:41:01 +02001942 CPUState *cpu = current_cpu;
1943 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001944 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001945 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001946 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001947 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001948
Andreas Färberff4700b2013-08-26 18:23:18 +02001949 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001950 /* We re-entered the check after replacing the TB. Now raise
1951 * the debug interrupt so that is will trigger after the
1952 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001953 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001954 return;
1955 }
Andreas Färber93afead2013-08-26 03:41:01 +02001956 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001957 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001958 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1959 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001960 if (flags == BP_MEM_READ) {
1961 wp->flags |= BP_WATCHPOINT_HIT_READ;
1962 } else {
1963 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1964 }
1965 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001966 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001967 if (!cpu->watchpoint_hit) {
1968 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001969 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001970 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001971 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001972 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001973 } else {
1974 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001975 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001976 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001977 }
aliguori06d55cc2008-11-18 20:24:06 +00001978 }
aliguori6e140f22008-11-18 20:37:55 +00001979 } else {
1980 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001981 }
1982 }
1983}
1984
pbrook6658ffb2007-03-16 23:58:11 +00001985/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1986 so these check for a hit then pass through to the normal out-of-line
1987 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001988static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1989 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001990{
Peter Maydell66b9b432015-04-26 16:49:24 +01001991 MemTxResult res;
1992 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001993
Peter Maydell66b9b432015-04-26 16:49:24 +01001994 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001995 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001996 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001997 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001998 break;
1999 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002000 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002001 break;
2002 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002003 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002004 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002005 default: abort();
2006 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002007 *pdata = data;
2008 return res;
2009}
2010
2011static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2012 uint64_t val, unsigned size,
2013 MemTxAttrs attrs)
2014{
2015 MemTxResult res;
2016
2017 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2018 switch (size) {
2019 case 1:
2020 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2021 break;
2022 case 2:
2023 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2024 break;
2025 case 4:
2026 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2027 break;
2028 default: abort();
2029 }
2030 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002031}
2032
Avi Kivity1ec9b902012-01-02 12:47:48 +02002033static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002034 .read_with_attrs = watch_mem_read,
2035 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002036 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002037};
pbrook6658ffb2007-03-16 23:58:11 +00002038
Peter Maydellf25a49e2015-04-26 16:49:24 +01002039static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2040 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002041{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002042 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002043 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002044 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002045
blueswir1db7b5422007-05-26 17:36:03 +00002046#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002047 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002048 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002049#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002050 res = address_space_read(subpage->as, addr + subpage->base,
2051 attrs, buf, len);
2052 if (res) {
2053 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002054 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002055 switch (len) {
2056 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002057 *data = ldub_p(buf);
2058 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002059 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002060 *data = lduw_p(buf);
2061 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002062 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002063 *data = ldl_p(buf);
2064 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002065 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002066 *data = ldq_p(buf);
2067 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002068 default:
2069 abort();
2070 }
blueswir1db7b5422007-05-26 17:36:03 +00002071}
2072
Peter Maydellf25a49e2015-04-26 16:49:24 +01002073static MemTxResult subpage_write(void *opaque, hwaddr addr,
2074 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002075{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002076 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002077 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002078
blueswir1db7b5422007-05-26 17:36:03 +00002079#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002080 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002081 " value %"PRIx64"\n",
2082 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002083#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002084 switch (len) {
2085 case 1:
2086 stb_p(buf, value);
2087 break;
2088 case 2:
2089 stw_p(buf, value);
2090 break;
2091 case 4:
2092 stl_p(buf, value);
2093 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002094 case 8:
2095 stq_p(buf, value);
2096 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002097 default:
2098 abort();
2099 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002100 return address_space_write(subpage->as, addr + subpage->base,
2101 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002102}
2103
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002104static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002105 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002106{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002107 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002108#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002109 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002110 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002111#endif
2112
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002113 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002114 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002115}
2116
Avi Kivity70c68e42012-01-02 12:32:48 +02002117static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002118 .read_with_attrs = subpage_read,
2119 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002120 .impl.min_access_size = 1,
2121 .impl.max_access_size = 8,
2122 .valid.min_access_size = 1,
2123 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002124 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002125 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002126};
2127
Anthony Liguoric227f092009-10-01 16:12:16 -05002128static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002129 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002130{
2131 int idx, eidx;
2132
2133 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2134 return -1;
2135 idx = SUBPAGE_IDX(start);
2136 eidx = SUBPAGE_IDX(end);
2137#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002138 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2139 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002140#endif
blueswir1db7b5422007-05-26 17:36:03 +00002141 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002142 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002143 }
2144
2145 return 0;
2146}
2147
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002148static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002149{
Anthony Liguoric227f092009-10-01 16:12:16 -05002150 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002151
Anthony Liguori7267c092011-08-20 22:09:37 -05002152 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002153
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002154 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002155 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002156 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002157 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002158 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002159#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002160 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2161 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002162#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002163 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002164
2165 return mmio;
2166}
2167
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002168static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2169 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002170{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002171 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002172 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002173 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002174 .mr = mr,
2175 .offset_within_address_space = 0,
2176 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002177 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002178 };
2179
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002180 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002181}
2182
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002183MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002184{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002185 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2186 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002187
2188 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002189}
2190
Avi Kivitye9179ce2009-06-14 11:38:52 +03002191static void io_mem_init(void)
2192{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002193 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002194 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002195 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002196 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002197 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002198 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002199 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002200}
2201
Avi Kivityac1970f2012-10-03 16:22:53 +02002202static void mem_begin(MemoryListener *listener)
2203{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002204 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002205 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2206 uint16_t n;
2207
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002208 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002209 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002210 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002211 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002212 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002213 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002214 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002215 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002216
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002217 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002218 d->as = as;
2219 as->next_dispatch = d;
2220}
2221
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002222static void address_space_dispatch_free(AddressSpaceDispatch *d)
2223{
2224 phys_sections_free(&d->map);
2225 g_free(d);
2226}
2227
Paolo Bonzini00752702013-05-29 12:13:54 +02002228static void mem_commit(MemoryListener *listener)
2229{
2230 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002231 AddressSpaceDispatch *cur = as->dispatch;
2232 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002233
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002234 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002235
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002236 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002237 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002238 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002239 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002240}
2241
Avi Kivity1d711482012-10-02 18:54:45 +02002242static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002243{
Andreas Färber182735e2013-05-29 22:29:20 +02002244 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002245
2246 /* since each CPU stores ram addresses in its TLB cache, we must
2247 reset the modified entries */
2248 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002249 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002250 /* FIXME: Disentangle the cpu.h circular files deps so we can
2251 directly get the right CPU from listener. */
2252 if (cpu->tcg_as_listener != listener) {
2253 continue;
2254 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002255 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002256 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002257}
2258
Avi Kivityac1970f2012-10-03 16:22:53 +02002259void address_space_init_dispatch(AddressSpace *as)
2260{
Paolo Bonzini00752702013-05-29 12:13:54 +02002261 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002262 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002263 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002264 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002265 .region_add = mem_add,
2266 .region_nop = mem_add,
2267 .priority = 0,
2268 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002269 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002270}
2271
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002272void address_space_unregister(AddressSpace *as)
2273{
2274 memory_listener_unregister(&as->dispatch_listener);
2275}
2276
Avi Kivity83f3c252012-10-07 12:59:55 +02002277void address_space_destroy_dispatch(AddressSpace *as)
2278{
2279 AddressSpaceDispatch *d = as->dispatch;
2280
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002281 atomic_rcu_set(&as->dispatch, NULL);
2282 if (d) {
2283 call_rcu(d, address_space_dispatch_free, rcu);
2284 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002285}
2286
Avi Kivity62152b82011-07-26 14:26:14 +03002287static void memory_map_init(void)
2288{
Anthony Liguori7267c092011-08-20 22:09:37 -05002289 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002290
Paolo Bonzini57271d62013-11-07 17:14:37 +01002291 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002292 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002293
Anthony Liguori7267c092011-08-20 22:09:37 -05002294 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002295 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2296 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002297 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002298}
2299
2300MemoryRegion *get_system_memory(void)
2301{
2302 return system_memory;
2303}
2304
Avi Kivity309cb472011-08-08 16:09:03 +03002305MemoryRegion *get_system_io(void)
2306{
2307 return system_io;
2308}
2309
pbrooke2eef172008-06-08 01:09:01 +00002310#endif /* !defined(CONFIG_USER_ONLY) */
2311
bellard13eb76e2004-01-24 15:23:36 +00002312/* physical memory access (slow version, mainly for debug) */
2313#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002314int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002315 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002316{
2317 int l, flags;
2318 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002319 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002320
2321 while (len > 0) {
2322 page = addr & TARGET_PAGE_MASK;
2323 l = (page + TARGET_PAGE_SIZE) - addr;
2324 if (l > len)
2325 l = len;
2326 flags = page_get_flags(page);
2327 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002328 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002329 if (is_write) {
2330 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002331 return -1;
bellard579a97f2007-11-11 14:26:47 +00002332 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002333 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002334 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002335 memcpy(p, buf, l);
2336 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002337 } else {
2338 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002339 return -1;
bellard579a97f2007-11-11 14:26:47 +00002340 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002341 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002342 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002343 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002344 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002345 }
2346 len -= l;
2347 buf += l;
2348 addr += l;
2349 }
Paul Brooka68fe892010-03-01 00:08:59 +00002350 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002351}
bellard8df1cd02005-01-28 22:37:22 +00002352
bellard13eb76e2004-01-24 15:23:36 +00002353#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002354
Paolo Bonzini845b6212015-03-23 11:45:53 +01002355static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002356 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002357{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002358 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2359 /* No early return if dirty_log_mask is or becomes 0, because
2360 * cpu_physical_memory_set_dirty_range will still call
2361 * xen_modified_memory.
2362 */
2363 if (dirty_log_mask) {
2364 dirty_log_mask =
2365 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002366 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002367 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2368 tb_invalidate_phys_range(addr, addr + length);
2369 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2370 }
2371 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002372}
2373
Richard Henderson23326162013-07-08 14:55:59 -07002374static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002375{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002376 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002377
2378 /* Regions are assumed to support 1-4 byte accesses unless
2379 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002380 if (access_size_max == 0) {
2381 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002382 }
Richard Henderson23326162013-07-08 14:55:59 -07002383
2384 /* Bound the maximum access by the alignment of the address. */
2385 if (!mr->ops->impl.unaligned) {
2386 unsigned align_size_max = addr & -addr;
2387 if (align_size_max != 0 && align_size_max < access_size_max) {
2388 access_size_max = align_size_max;
2389 }
2390 }
2391
2392 /* Don't attempt accesses larger than the maximum. */
2393 if (l > access_size_max) {
2394 l = access_size_max;
2395 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002396 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002397
2398 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002399}
2400
Jan Kiszka4840f102015-06-18 18:47:22 +02002401static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002402{
Jan Kiszka4840f102015-06-18 18:47:22 +02002403 bool unlocked = !qemu_mutex_iothread_locked();
2404 bool release_lock = false;
2405
2406 if (unlocked && mr->global_locking) {
2407 qemu_mutex_lock_iothread();
2408 unlocked = false;
2409 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002410 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002411 if (mr->flush_coalesced_mmio) {
2412 if (unlocked) {
2413 qemu_mutex_lock_iothread();
2414 }
2415 qemu_flush_coalesced_mmio_buffer();
2416 if (unlocked) {
2417 qemu_mutex_unlock_iothread();
2418 }
2419 }
2420
2421 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002422}
2423
Peter Maydell5c9eb022015-04-26 16:49:24 +01002424MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2425 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002426{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002427 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002428 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002429 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002430 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002431 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002432 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002433 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002434
Paolo Bonzini41063e12015-03-18 14:21:43 +01002435 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002436 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002437 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002438 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002439
bellard13eb76e2004-01-24 15:23:36 +00002440 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002441 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002442 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002443 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002444 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002445 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002446 switch (l) {
2447 case 8:
2448 /* 64 bit write access */
2449 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002450 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2451 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002452 break;
2453 case 4:
bellard1c213d12005-09-03 10:49:04 +00002454 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002455 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002456 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2457 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002458 break;
2459 case 2:
bellard1c213d12005-09-03 10:49:04 +00002460 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002461 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002462 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2463 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002464 break;
2465 case 1:
bellard1c213d12005-09-03 10:49:04 +00002466 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002467 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002468 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2469 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002470 break;
2471 default:
2472 abort();
bellard13eb76e2004-01-24 15:23:36 +00002473 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002474 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002475 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002476 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002477 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002478 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002479 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002480 }
2481 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002482 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002483 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002484 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002485 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002486 switch (l) {
2487 case 8:
2488 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002489 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2490 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002491 stq_p(buf, val);
2492 break;
2493 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002494 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002495 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2496 attrs);
bellardc27004e2005-01-03 23:35:10 +00002497 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002498 break;
2499 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002500 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002501 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2502 attrs);
bellardc27004e2005-01-03 23:35:10 +00002503 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002504 break;
2505 case 1:
bellard1c213d12005-09-03 10:49:04 +00002506 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002507 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2508 attrs);
bellardc27004e2005-01-03 23:35:10 +00002509 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002510 break;
2511 default:
2512 abort();
bellard13eb76e2004-01-24 15:23:36 +00002513 }
2514 } else {
2515 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002516 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002517 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002518 }
2519 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002520
2521 if (release_lock) {
2522 qemu_mutex_unlock_iothread();
2523 release_lock = false;
2524 }
2525
bellard13eb76e2004-01-24 15:23:36 +00002526 len -= l;
2527 buf += l;
2528 addr += l;
2529 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002530 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002531
Peter Maydell3b643492015-04-26 16:49:23 +01002532 return result;
bellard13eb76e2004-01-24 15:23:36 +00002533}
bellard8df1cd02005-01-28 22:37:22 +00002534
Peter Maydell5c9eb022015-04-26 16:49:24 +01002535MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2536 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002537{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002538 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002539}
2540
Peter Maydell5c9eb022015-04-26 16:49:24 +01002541MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2542 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002543{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002544 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002545}
2546
2547
Avi Kivitya8170e52012-10-23 12:30:10 +02002548void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002549 int len, int is_write)
2550{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002551 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2552 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002553}
2554
Alexander Graf582b55a2013-12-11 14:17:44 +01002555enum write_rom_type {
2556 WRITE_DATA,
2557 FLUSH_CACHE,
2558};
2559
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002560static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002561 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002562{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002563 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002564 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002565 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002566 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002567
Paolo Bonzini41063e12015-03-18 14:21:43 +01002568 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002569 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002570 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002571 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002572
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002573 if (!(memory_region_is_ram(mr) ||
2574 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002575 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002576 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002577 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002578 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002579 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002580 switch (type) {
2581 case WRITE_DATA:
2582 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002583 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002584 break;
2585 case FLUSH_CACHE:
2586 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2587 break;
2588 }
bellardd0ecd2a2006-04-23 17:14:48 +00002589 }
2590 len -= l;
2591 buf += l;
2592 addr += l;
2593 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002594 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002595}
2596
Alexander Graf582b55a2013-12-11 14:17:44 +01002597/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002598void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002599 const uint8_t *buf, int len)
2600{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002601 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002602}
2603
2604void cpu_flush_icache_range(hwaddr start, int len)
2605{
2606 /*
2607 * This function should do the same thing as an icache flush that was
2608 * triggered from within the guest. For TCG we are always cache coherent,
2609 * so there is no need to flush anything. For KVM / Xen we need to flush
2610 * the host's instruction cache at least.
2611 */
2612 if (tcg_enabled()) {
2613 return;
2614 }
2615
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002616 cpu_physical_memory_write_rom_internal(&address_space_memory,
2617 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002618}
2619
aliguori6d16c2f2009-01-22 16:59:11 +00002620typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002621 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002622 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002623 hwaddr addr;
2624 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002625 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002626} BounceBuffer;
2627
2628static BounceBuffer bounce;
2629
aliguoriba223c22009-01-22 16:59:16 +00002630typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002631 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002632 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002633} MapClient;
2634
Fam Zheng38e047b2015-03-16 17:03:35 +08002635QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002636static QLIST_HEAD(map_client_list, MapClient) map_client_list
2637 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002638
Fam Zhenge95205e2015-03-16 17:03:37 +08002639static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002640{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002641 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002642 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002643}
2644
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002645static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002646{
2647 MapClient *client;
2648
Blue Swirl72cf2d42009-09-12 07:36:22 +00002649 while (!QLIST_EMPTY(&map_client_list)) {
2650 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002651 qemu_bh_schedule(client->bh);
2652 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002653 }
2654}
2655
Fam Zhenge95205e2015-03-16 17:03:37 +08002656void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002657{
2658 MapClient *client = g_malloc(sizeof(*client));
2659
Fam Zheng38e047b2015-03-16 17:03:35 +08002660 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002661 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002662 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002663 if (!atomic_read(&bounce.in_use)) {
2664 cpu_notify_map_clients_locked();
2665 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002666 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002667}
2668
Fam Zheng38e047b2015-03-16 17:03:35 +08002669void cpu_exec_init_all(void)
2670{
2671 qemu_mutex_init(&ram_list.mutex);
2672 memory_map_init();
2673 io_mem_init();
2674 qemu_mutex_init(&map_client_list_lock);
2675}
2676
Fam Zhenge95205e2015-03-16 17:03:37 +08002677void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002678{
Fam Zhenge95205e2015-03-16 17:03:37 +08002679 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002680
Fam Zhenge95205e2015-03-16 17:03:37 +08002681 qemu_mutex_lock(&map_client_list_lock);
2682 QLIST_FOREACH(client, &map_client_list, link) {
2683 if (client->bh == bh) {
2684 cpu_unregister_map_client_do(client);
2685 break;
2686 }
2687 }
2688 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002689}
2690
2691static void cpu_notify_map_clients(void)
2692{
Fam Zheng38e047b2015-03-16 17:03:35 +08002693 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002694 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002695 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002696}
2697
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002698bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2699{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002700 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002701 hwaddr l, xlat;
2702
Paolo Bonzini41063e12015-03-18 14:21:43 +01002703 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002704 while (len > 0) {
2705 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002706 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2707 if (!memory_access_is_direct(mr, is_write)) {
2708 l = memory_access_size(mr, l, addr);
2709 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002710 return false;
2711 }
2712 }
2713
2714 len -= l;
2715 addr += l;
2716 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002717 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002718 return true;
2719}
2720
aliguori6d16c2f2009-01-22 16:59:11 +00002721/* Map a physical memory region into a host virtual address.
2722 * May map a subset of the requested range, given by and returned in *plen.
2723 * May return NULL if resources needed to perform the mapping are exhausted.
2724 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002725 * Use cpu_register_map_client() to know when retrying the map operation is
2726 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002727 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002728void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002729 hwaddr addr,
2730 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002731 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002732{
Avi Kivitya8170e52012-10-23 12:30:10 +02002733 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002734 hwaddr done = 0;
2735 hwaddr l, xlat, base;
2736 MemoryRegion *mr, *this_mr;
2737 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002738
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002739 if (len == 0) {
2740 return NULL;
2741 }
aliguori6d16c2f2009-01-22 16:59:11 +00002742
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002743 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002744 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002745 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002746
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002747 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002748 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002749 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002750 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002751 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002752 /* Avoid unbounded allocations */
2753 l = MIN(l, TARGET_PAGE_SIZE);
2754 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002755 bounce.addr = addr;
2756 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002757
2758 memory_region_ref(mr);
2759 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002760 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002761 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2762 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002763 }
aliguori6d16c2f2009-01-22 16:59:11 +00002764
Paolo Bonzini41063e12015-03-18 14:21:43 +01002765 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002766 *plen = l;
2767 return bounce.buffer;
2768 }
2769
2770 base = xlat;
2771 raddr = memory_region_get_ram_addr(mr);
2772
2773 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002774 len -= l;
2775 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002776 done += l;
2777 if (len == 0) {
2778 break;
2779 }
2780
2781 l = len;
2782 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2783 if (this_mr != mr || xlat != base + done) {
2784 break;
2785 }
aliguori6d16c2f2009-01-22 16:59:11 +00002786 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002787
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002788 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002789 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002790 *plen = done;
2791 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002792}
2793
Avi Kivityac1970f2012-10-03 16:22:53 +02002794/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002795 * Will also mark the memory as dirty if is_write == 1. access_len gives
2796 * the amount of memory that was actually read or written by the caller.
2797 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002798void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2799 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002800{
2801 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002802 MemoryRegion *mr;
2803 ram_addr_t addr1;
2804
2805 mr = qemu_ram_addr_from_host(buffer, &addr1);
2806 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002807 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002808 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002809 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002810 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002811 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002812 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002813 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002814 return;
2815 }
2816 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002817 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2818 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002819 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002820 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002821 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002822 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002823 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002824 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002825}
bellardd0ecd2a2006-04-23 17:14:48 +00002826
Avi Kivitya8170e52012-10-23 12:30:10 +02002827void *cpu_physical_memory_map(hwaddr addr,
2828 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002829 int is_write)
2830{
2831 return address_space_map(&address_space_memory, addr, plen, is_write);
2832}
2833
Avi Kivitya8170e52012-10-23 12:30:10 +02002834void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2835 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002836{
2837 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2838}
2839
bellard8df1cd02005-01-28 22:37:22 +00002840/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002841static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2842 MemTxAttrs attrs,
2843 MemTxResult *result,
2844 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002845{
bellard8df1cd02005-01-28 22:37:22 +00002846 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002847 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002848 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002849 hwaddr l = 4;
2850 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002851 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002852 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002853
Paolo Bonzini41063e12015-03-18 14:21:43 +01002854 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002855 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002856 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002857 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002858
bellard8df1cd02005-01-28 22:37:22 +00002859 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002860 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002861#if defined(TARGET_WORDS_BIGENDIAN)
2862 if (endian == DEVICE_LITTLE_ENDIAN) {
2863 val = bswap32(val);
2864 }
2865#else
2866 if (endian == DEVICE_BIG_ENDIAN) {
2867 val = bswap32(val);
2868 }
2869#endif
bellard8df1cd02005-01-28 22:37:22 +00002870 } else {
2871 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002872 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002873 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002874 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002875 switch (endian) {
2876 case DEVICE_LITTLE_ENDIAN:
2877 val = ldl_le_p(ptr);
2878 break;
2879 case DEVICE_BIG_ENDIAN:
2880 val = ldl_be_p(ptr);
2881 break;
2882 default:
2883 val = ldl_p(ptr);
2884 break;
2885 }
Peter Maydell50013112015-04-26 16:49:24 +01002886 r = MEMTX_OK;
2887 }
2888 if (result) {
2889 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002890 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002891 if (release_lock) {
2892 qemu_mutex_unlock_iothread();
2893 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002894 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002895 return val;
2896}
2897
Peter Maydell50013112015-04-26 16:49:24 +01002898uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2899 MemTxAttrs attrs, MemTxResult *result)
2900{
2901 return address_space_ldl_internal(as, addr, attrs, result,
2902 DEVICE_NATIVE_ENDIAN);
2903}
2904
2905uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2906 MemTxAttrs attrs, MemTxResult *result)
2907{
2908 return address_space_ldl_internal(as, addr, attrs, result,
2909 DEVICE_LITTLE_ENDIAN);
2910}
2911
2912uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2913 MemTxAttrs attrs, MemTxResult *result)
2914{
2915 return address_space_ldl_internal(as, addr, attrs, result,
2916 DEVICE_BIG_ENDIAN);
2917}
2918
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002919uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002920{
Peter Maydell50013112015-04-26 16:49:24 +01002921 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002922}
2923
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002924uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002925{
Peter Maydell50013112015-04-26 16:49:24 +01002926 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002927}
2928
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002929uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002930{
Peter Maydell50013112015-04-26 16:49:24 +01002931 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002932}
2933
bellard84b7b8e2005-11-28 21:19:04 +00002934/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002935static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2936 MemTxAttrs attrs,
2937 MemTxResult *result,
2938 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002939{
bellard84b7b8e2005-11-28 21:19:04 +00002940 uint8_t *ptr;
2941 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002942 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002943 hwaddr l = 8;
2944 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002945 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002946 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002947
Paolo Bonzini41063e12015-03-18 14:21:43 +01002948 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002949 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002950 false);
2951 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002952 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002953
bellard84b7b8e2005-11-28 21:19:04 +00002954 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002955 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002956#if defined(TARGET_WORDS_BIGENDIAN)
2957 if (endian == DEVICE_LITTLE_ENDIAN) {
2958 val = bswap64(val);
2959 }
2960#else
2961 if (endian == DEVICE_BIG_ENDIAN) {
2962 val = bswap64(val);
2963 }
2964#endif
bellard84b7b8e2005-11-28 21:19:04 +00002965 } else {
2966 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002967 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002968 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002969 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002970 switch (endian) {
2971 case DEVICE_LITTLE_ENDIAN:
2972 val = ldq_le_p(ptr);
2973 break;
2974 case DEVICE_BIG_ENDIAN:
2975 val = ldq_be_p(ptr);
2976 break;
2977 default:
2978 val = ldq_p(ptr);
2979 break;
2980 }
Peter Maydell50013112015-04-26 16:49:24 +01002981 r = MEMTX_OK;
2982 }
2983 if (result) {
2984 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002985 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002986 if (release_lock) {
2987 qemu_mutex_unlock_iothread();
2988 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002989 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002990 return val;
2991}
2992
Peter Maydell50013112015-04-26 16:49:24 +01002993uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2994 MemTxAttrs attrs, MemTxResult *result)
2995{
2996 return address_space_ldq_internal(as, addr, attrs, result,
2997 DEVICE_NATIVE_ENDIAN);
2998}
2999
3000uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3001 MemTxAttrs attrs, MemTxResult *result)
3002{
3003 return address_space_ldq_internal(as, addr, attrs, result,
3004 DEVICE_LITTLE_ENDIAN);
3005}
3006
3007uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3008 MemTxAttrs attrs, MemTxResult *result)
3009{
3010 return address_space_ldq_internal(as, addr, attrs, result,
3011 DEVICE_BIG_ENDIAN);
3012}
3013
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003014uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003015{
Peter Maydell50013112015-04-26 16:49:24 +01003016 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003017}
3018
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003019uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003020{
Peter Maydell50013112015-04-26 16:49:24 +01003021 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003022}
3023
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003024uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003025{
Peter Maydell50013112015-04-26 16:49:24 +01003026 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003027}
3028
bellardaab33092005-10-30 20:48:42 +00003029/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003030uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3031 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003032{
3033 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003034 MemTxResult r;
3035
3036 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3037 if (result) {
3038 *result = r;
3039 }
bellardaab33092005-10-30 20:48:42 +00003040 return val;
3041}
3042
Peter Maydell50013112015-04-26 16:49:24 +01003043uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3044{
3045 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3046}
3047
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003048/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003049static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3050 hwaddr addr,
3051 MemTxAttrs attrs,
3052 MemTxResult *result,
3053 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003054{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003055 uint8_t *ptr;
3056 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003057 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003058 hwaddr l = 2;
3059 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003060 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003061 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003062
Paolo Bonzini41063e12015-03-18 14:21:43 +01003063 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003064 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003065 false);
3066 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003067 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003068
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003069 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003070 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003071#if defined(TARGET_WORDS_BIGENDIAN)
3072 if (endian == DEVICE_LITTLE_ENDIAN) {
3073 val = bswap16(val);
3074 }
3075#else
3076 if (endian == DEVICE_BIG_ENDIAN) {
3077 val = bswap16(val);
3078 }
3079#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003080 } else {
3081 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003082 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003083 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003084 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003085 switch (endian) {
3086 case DEVICE_LITTLE_ENDIAN:
3087 val = lduw_le_p(ptr);
3088 break;
3089 case DEVICE_BIG_ENDIAN:
3090 val = lduw_be_p(ptr);
3091 break;
3092 default:
3093 val = lduw_p(ptr);
3094 break;
3095 }
Peter Maydell50013112015-04-26 16:49:24 +01003096 r = MEMTX_OK;
3097 }
3098 if (result) {
3099 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003100 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003101 if (release_lock) {
3102 qemu_mutex_unlock_iothread();
3103 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003104 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003105 return val;
bellardaab33092005-10-30 20:48:42 +00003106}
3107
Peter Maydell50013112015-04-26 16:49:24 +01003108uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3109 MemTxAttrs attrs, MemTxResult *result)
3110{
3111 return address_space_lduw_internal(as, addr, attrs, result,
3112 DEVICE_NATIVE_ENDIAN);
3113}
3114
3115uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3116 MemTxAttrs attrs, MemTxResult *result)
3117{
3118 return address_space_lduw_internal(as, addr, attrs, result,
3119 DEVICE_LITTLE_ENDIAN);
3120}
3121
3122uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3123 MemTxAttrs attrs, MemTxResult *result)
3124{
3125 return address_space_lduw_internal(as, addr, attrs, result,
3126 DEVICE_BIG_ENDIAN);
3127}
3128
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003129uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003130{
Peter Maydell50013112015-04-26 16:49:24 +01003131 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003132}
3133
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003134uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135{
Peter Maydell50013112015-04-26 16:49:24 +01003136 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003137}
3138
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003139uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003140{
Peter Maydell50013112015-04-26 16:49:24 +01003141 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142}
3143
bellard8df1cd02005-01-28 22:37:22 +00003144/* warning: addr must be aligned. The ram page is not masked as dirty
3145 and the code inside is not invalidated. It is useful if the dirty
3146 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003147void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3148 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003149{
bellard8df1cd02005-01-28 22:37:22 +00003150 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003151 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003152 hwaddr l = 4;
3153 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003154 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003155 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003156 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003157
Paolo Bonzini41063e12015-03-18 14:21:43 +01003158 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003159 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003160 true);
3161 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003162 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003163
Peter Maydell50013112015-04-26 16:49:24 +01003164 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003165 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003166 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003167 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003168 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003169
Paolo Bonzini845b6212015-03-23 11:45:53 +01003170 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3171 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003172 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003173 r = MEMTX_OK;
3174 }
3175 if (result) {
3176 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003177 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003178 if (release_lock) {
3179 qemu_mutex_unlock_iothread();
3180 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003181 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003182}
3183
Peter Maydell50013112015-04-26 16:49:24 +01003184void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3185{
3186 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3187}
3188
bellard8df1cd02005-01-28 22:37:22 +00003189/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003190static inline void address_space_stl_internal(AddressSpace *as,
3191 hwaddr addr, uint32_t val,
3192 MemTxAttrs attrs,
3193 MemTxResult *result,
3194 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003195{
bellard8df1cd02005-01-28 22:37:22 +00003196 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003197 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003198 hwaddr l = 4;
3199 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003200 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003201 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003202
Paolo Bonzini41063e12015-03-18 14:21:43 +01003203 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003204 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003205 true);
3206 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003207 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003208
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003209#if defined(TARGET_WORDS_BIGENDIAN)
3210 if (endian == DEVICE_LITTLE_ENDIAN) {
3211 val = bswap32(val);
3212 }
3213#else
3214 if (endian == DEVICE_BIG_ENDIAN) {
3215 val = bswap32(val);
3216 }
3217#endif
Peter Maydell50013112015-04-26 16:49:24 +01003218 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003219 } else {
bellard8df1cd02005-01-28 22:37:22 +00003220 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003221 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003222 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003223 switch (endian) {
3224 case DEVICE_LITTLE_ENDIAN:
3225 stl_le_p(ptr, val);
3226 break;
3227 case DEVICE_BIG_ENDIAN:
3228 stl_be_p(ptr, val);
3229 break;
3230 default:
3231 stl_p(ptr, val);
3232 break;
3233 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003234 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003235 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003236 }
Peter Maydell50013112015-04-26 16:49:24 +01003237 if (result) {
3238 *result = r;
3239 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003240 if (release_lock) {
3241 qemu_mutex_unlock_iothread();
3242 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003243 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003244}
3245
3246void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3247 MemTxAttrs attrs, MemTxResult *result)
3248{
3249 address_space_stl_internal(as, addr, val, attrs, result,
3250 DEVICE_NATIVE_ENDIAN);
3251}
3252
3253void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3254 MemTxAttrs attrs, MemTxResult *result)
3255{
3256 address_space_stl_internal(as, addr, val, attrs, result,
3257 DEVICE_LITTLE_ENDIAN);
3258}
3259
3260void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3261 MemTxAttrs attrs, MemTxResult *result)
3262{
3263 address_space_stl_internal(as, addr, val, attrs, result,
3264 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003265}
3266
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003267void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003268{
Peter Maydell50013112015-04-26 16:49:24 +01003269 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003270}
3271
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003272void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003273{
Peter Maydell50013112015-04-26 16:49:24 +01003274 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003275}
3276
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003277void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003278{
Peter Maydell50013112015-04-26 16:49:24 +01003279 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003280}
3281
bellardaab33092005-10-30 20:48:42 +00003282/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003283void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3284 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003285{
3286 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003287 MemTxResult r;
3288
3289 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3290 if (result) {
3291 *result = r;
3292 }
3293}
3294
3295void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3296{
3297 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003298}
3299
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003300/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003301static inline void address_space_stw_internal(AddressSpace *as,
3302 hwaddr addr, uint32_t val,
3303 MemTxAttrs attrs,
3304 MemTxResult *result,
3305 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003306{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003307 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003308 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003309 hwaddr l = 2;
3310 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003311 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003312 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003313
Paolo Bonzini41063e12015-03-18 14:21:43 +01003314 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003315 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003316 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003317 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003318
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003319#if defined(TARGET_WORDS_BIGENDIAN)
3320 if (endian == DEVICE_LITTLE_ENDIAN) {
3321 val = bswap16(val);
3322 }
3323#else
3324 if (endian == DEVICE_BIG_ENDIAN) {
3325 val = bswap16(val);
3326 }
3327#endif
Peter Maydell50013112015-04-26 16:49:24 +01003328 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003329 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003330 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003331 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003332 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003333 switch (endian) {
3334 case DEVICE_LITTLE_ENDIAN:
3335 stw_le_p(ptr, val);
3336 break;
3337 case DEVICE_BIG_ENDIAN:
3338 stw_be_p(ptr, val);
3339 break;
3340 default:
3341 stw_p(ptr, val);
3342 break;
3343 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003344 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003345 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003346 }
Peter Maydell50013112015-04-26 16:49:24 +01003347 if (result) {
3348 *result = r;
3349 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003350 if (release_lock) {
3351 qemu_mutex_unlock_iothread();
3352 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003353 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003354}
3355
3356void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3357 MemTxAttrs attrs, MemTxResult *result)
3358{
3359 address_space_stw_internal(as, addr, val, attrs, result,
3360 DEVICE_NATIVE_ENDIAN);
3361}
3362
3363void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3364 MemTxAttrs attrs, MemTxResult *result)
3365{
3366 address_space_stw_internal(as, addr, val, attrs, result,
3367 DEVICE_LITTLE_ENDIAN);
3368}
3369
3370void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3371 MemTxAttrs attrs, MemTxResult *result)
3372{
3373 address_space_stw_internal(as, addr, val, attrs, result,
3374 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003375}
3376
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003377void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003378{
Peter Maydell50013112015-04-26 16:49:24 +01003379 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003380}
3381
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003382void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003383{
Peter Maydell50013112015-04-26 16:49:24 +01003384 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003385}
3386
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003387void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003388{
Peter Maydell50013112015-04-26 16:49:24 +01003389 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003390}
3391
bellardaab33092005-10-30 20:48:42 +00003392/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003393void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3394 MemTxAttrs attrs, MemTxResult *result)
3395{
3396 MemTxResult r;
3397 val = tswap64(val);
3398 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3399 if (result) {
3400 *result = r;
3401 }
3402}
3403
3404void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3405 MemTxAttrs attrs, MemTxResult *result)
3406{
3407 MemTxResult r;
3408 val = cpu_to_le64(val);
3409 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3410 if (result) {
3411 *result = r;
3412 }
3413}
3414void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3415 MemTxAttrs attrs, MemTxResult *result)
3416{
3417 MemTxResult r;
3418 val = cpu_to_be64(val);
3419 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3420 if (result) {
3421 *result = r;
3422 }
3423}
3424
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003425void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003426{
Peter Maydell50013112015-04-26 16:49:24 +01003427 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003428}
3429
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003430void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003431{
Peter Maydell50013112015-04-26 16:49:24 +01003432 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003433}
3434
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003435void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003436{
Peter Maydell50013112015-04-26 16:49:24 +01003437 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003438}
3439
aliguori5e2972f2009-03-28 17:51:36 +00003440/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003441int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003442 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003443{
3444 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003445 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003446 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003447
3448 while (len > 0) {
3449 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003450 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003451 /* if no physical page mapped, return an error */
3452 if (phys_addr == -1)
3453 return -1;
3454 l = (page + TARGET_PAGE_SIZE) - addr;
3455 if (l > len)
3456 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003457 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003458 if (is_write) {
3459 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3460 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003461 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3462 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003463 }
bellard13eb76e2004-01-24 15:23:36 +00003464 len -= l;
3465 buf += l;
3466 addr += l;
3467 }
3468 return 0;
3469}
Paul Brooka68fe892010-03-01 00:08:59 +00003470#endif
bellard13eb76e2004-01-24 15:23:36 +00003471
Blue Swirl8e4a4242013-01-06 18:30:17 +00003472/*
3473 * A helper function for the _utterly broken_ virtio device model to find out if
3474 * it's running on a big endian machine. Don't do this at home kids!
3475 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003476bool target_words_bigendian(void);
3477bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003478{
3479#if defined(TARGET_WORDS_BIGENDIAN)
3480 return true;
3481#else
3482 return false;
3483#endif
3484}
3485
Wen Congyang76f35532012-05-07 12:04:18 +08003486#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003487bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003488{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003489 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003490 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003491 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003492
Paolo Bonzini41063e12015-03-18 14:21:43 +01003493 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003494 mr = address_space_translate(&address_space_memory,
3495 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003496
Paolo Bonzini41063e12015-03-18 14:21:43 +01003497 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3498 rcu_read_unlock();
3499 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003500}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003501
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003502int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003503{
3504 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003505 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003506
Mike Day0dc3f442013-09-05 14:41:35 -04003507 rcu_read_lock();
3508 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003509 ret = func(block->idstr, block->host, block->offset,
3510 block->used_length, opaque);
3511 if (ret) {
3512 break;
3513 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003514 }
Mike Day0dc3f442013-09-05 14:41:35 -04003515 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003516 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003517}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003518#endif