blob: 31d2dc79bce58c27ea232da0d60fe7f5b2a30d3b [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
59
blueswir1db7b5422007-05-26 17:36:03 +000060//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000061
pbrook99773bd2006-04-16 15:14:59 +000062#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040063/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
Mike Day0d53d9f2015-01-21 13:45:24 +010066RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030067
68static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030069static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030070
Avi Kivityf6790af2012-10-02 20:13:51 +020071AddressSpace address_space_io;
72AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020073
Paolo Bonzini0844e002013-05-24 14:37:28 +020074MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020075static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020076
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080077/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080080/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020083/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
pbrooke2eef172008-06-08 01:09:01 +000088#endif
bellard9fa3e852004-01-04 18:06:42 +000089
Andreas Färberbdc44642013-06-24 23:50:24 +020090struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000091/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020093DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000094/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000095 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000096 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010097int use_icount;
bellard6a00d602005-11-21 23:25:50 +000098
pbrooke2eef172008-06-08 01:09:01 +000099#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200105 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108};
109
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
Paolo Bonzini03f49952013-11-07 17:14:36 +0100112/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100113#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200115#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200121
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100123 struct rcu_head rcu;
124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200133struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100134 struct rcu_head rcu;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200140 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200141 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142};
143
Jan Kiszka90260c62013-05-26 21:46:51 +0200144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200156
pbrooke2eef172008-06-08 01:09:01 +0000157static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300158static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000159static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000160
Avi Kivity1ec9b902012-01-02 12:47:48 +0200161static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000162#endif
bellard54936002003-05-13 00:25:15 +0000163
Paul Brook6d9a1302010-02-28 23:55:53 +0000164#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200167{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Paolo Bonzinidb946042015-05-21 15:12:29 +0200175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200176{
177 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200178 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200179 PhysPageEntry e;
180 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200181
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200182 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200183 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200190 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200191 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193}
194
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200197 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198{
199 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200204 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200207
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200209 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200210 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200211 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200212 *index += step;
213 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200214 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200216 }
217 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218 }
219}
220
Avi Kivityac1970f2012-10-03 16:22:53 +0200221static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200222 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200223 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000224{
Avi Kivity29990972012-02-13 20:21:20 +0200225 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000227
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000229}
230
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290 }
291}
292
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000295{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200296 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200297 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200299
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200302 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200306 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200315}
316
Blue Swirle5548612012-04-21 13:08:33 +0000317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000320 && mr != &io_mem_watch;
321}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100323/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr addr,
326 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200327{
Jan Kiszka90260c62013-05-26 21:46:51 +0200328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200335 }
336 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200337}
338
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100339/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200340static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200343{
344 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200345 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200355 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200368 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200369 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200372 return section;
373}
Jan Kiszka90260c62013-05-26 21:46:51 +0200374
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
Paolo Bonzini41063e12015-03-18 14:21:43 +0100387/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200391{
Avi Kivity30951152012-10-30 13:47:46 +0200392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200395
396 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
Le Tan8d7b8cb2014-08-16 13:55:37 +0800405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700419 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 }
421
Avi Kivity30951152012-10-30 13:47:46 +0200422 *xlat = addr;
423 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200424}
425
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200427MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200430{
Avi Kivity30951152012-10-30 13:47:46 +0200431 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200434
435 assert(!section->mr->iommu_ops);
436 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200437}
bellard9fa3e852004-01-04 18:06:42 +0000438#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000439
Andreas Färberb170fce2013-01-20 20:23:22 +0100440#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000441
Juan Quintelae59fb372009-09-29 22:48:21 +0200442static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200443{
Andreas Färber259186a2013-01-17 18:51:17 +0100444 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445
aurel323098dba2009-03-07 21:28:24 +0000446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100448 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100449 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000450
451 return 0;
452}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200453
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
Paolo Bonziniadee6422014-12-19 12:53:14 +0100458 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
Paolo Bonziniadee6422014-12-19 12:53:14 +0100467 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200474 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
Andreas Färber1a1562f2013-06-17 04:09:11 +0200481const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200487 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200490 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200495 }
496};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200497
pbrook9656f322008-07-01 20:01:19 +0000498#endif
499
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100500CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400501{
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400503
Andreas Färberbdc44642013-06-24 23:50:24 +0200504 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100505 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200506 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100507 }
Glauber Costa950f1472009-06-09 12:15:18 -0400508 }
509
Andreas Färberbdc44642013-06-24 23:50:24 +0200510 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400511}
512
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
Bharata B Raob7bca732015-06-23 19:31:13 -0700529#ifndef CONFIG_USER_ONLY
530static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
531
532static int cpu_get_free_index(Error **errp)
533{
534 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
535
536 if (cpu >= MAX_CPUMASK_BITS) {
537 error_setg(errp, "Trying to use more CPUs than max of %d",
538 MAX_CPUMASK_BITS);
539 return -1;
540 }
541
542 bitmap_set(cpu_index_map, cpu, 1);
543 return cpu;
544}
545
546void cpu_exec_exit(CPUState *cpu)
547{
548 if (cpu->cpu_index == -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
550 return;
551 }
552
553 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
554 cpu->cpu_index = -1;
555}
556#else
557
558static int cpu_get_free_index(Error **errp)
559{
560 CPUState *some_cpu;
561 int cpu_index = 0;
562
563 CPU_FOREACH(some_cpu) {
564 cpu_index++;
565 }
566 return cpu_index;
567}
568
569void cpu_exec_exit(CPUState *cpu)
570{
571}
572#endif
573
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700574void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000575{
Andreas Färberb170fce2013-01-20 20:23:22 +0100576 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000577 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700578 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000579
Eduardo Habkost291135b2015-04-27 17:00:33 -0300580#ifndef CONFIG_USER_ONLY
581 cpu->as = &address_space_memory;
582 cpu->thread_id = qemu_get_thread_id();
583 cpu_reload_memory_map(cpu);
584#endif
585
pbrookc2764712009-03-07 15:24:59 +0000586#if defined(CONFIG_USER_ONLY)
587 cpu_list_lock();
588#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700589 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
590 if (local_err) {
591 error_propagate(errp, local_err);
592#if defined(CONFIG_USER_ONLY)
593 cpu_list_unlock();
594#endif
595 return;
bellard6a00d602005-11-21 23:25:50 +0000596 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200597 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000598#if defined(CONFIG_USER_ONLY)
599 cpu_list_unlock();
600#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200601 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
602 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
603 }
pbrookb3c77242008-06-30 16:31:04 +0000604#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600605 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700606 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100607 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200608 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000609#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100610 if (cc->vmsd != NULL) {
611 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
612 }
bellardfd6ce8f2003-05-14 19:00:11 +0000613}
614
Paul Brook94df27f2010-02-28 23:47:45 +0000615#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200616static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000617{
618 tb_invalidate_phys_page_range(pc, pc + 1, 0);
619}
620#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200621static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400622{
Max Filippove8262a12013-09-27 22:29:17 +0400623 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
624 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000625 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100626 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400627 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400628}
bellardc27004e2005-01-03 23:35:10 +0000629#endif
bellardd720b932004-04-25 17:57:43 +0000630
Paul Brookc527ee82010-03-01 03:31:14 +0000631#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200632void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000633
634{
635}
636
Peter Maydell3ee887e2014-09-12 14:06:48 +0100637int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
638 int flags)
639{
640 return -ENOSYS;
641}
642
643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
644{
645}
646
Andreas Färber75a34032013-09-02 16:57:02 +0200647int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000648 int flags, CPUWatchpoint **watchpoint)
649{
650 return -ENOSYS;
651}
652#else
pbrook6658ffb2007-03-16 23:58:11 +0000653/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200654int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000655 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000656{
aliguoric0ce9982008-11-25 22:13:57 +0000657 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000658
Peter Maydell05068c02014-09-12 14:06:48 +0100659 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700660 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200661 error_report("tried to set invalid watchpoint at %"
662 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000663 return -EINVAL;
664 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500665 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000666
aliguoria1d1bb32008-11-18 20:07:32 +0000667 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100668 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000669 wp->flags = flags;
670
aliguori2dc9f412008-11-18 20:56:59 +0000671 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200672 if (flags & BP_GDB) {
673 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
674 } else {
675 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
676 }
aliguoria1d1bb32008-11-18 20:07:32 +0000677
Andreas Färber31b030d2013-09-04 01:29:02 +0200678 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000679
680 if (watchpoint)
681 *watchpoint = wp;
682 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000683}
684
aliguoria1d1bb32008-11-18 20:07:32 +0000685/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200686int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000687 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000688{
aliguoria1d1bb32008-11-18 20:07:32 +0000689 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000690
Andreas Färberff4700b2013-08-26 18:23:18 +0200691 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100692 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000693 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200694 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000695 return 0;
696 }
697 }
aliguoria1d1bb32008-11-18 20:07:32 +0000698 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000699}
700
aliguoria1d1bb32008-11-18 20:07:32 +0000701/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200702void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000703{
Andreas Färberff4700b2013-08-26 18:23:18 +0200704 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000705
Andreas Färber31b030d2013-09-04 01:29:02 +0200706 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000707
Anthony Liguori7267c092011-08-20 22:09:37 -0500708 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000709}
710
aliguoria1d1bb32008-11-18 20:07:32 +0000711/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200712void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000713{
aliguoric0ce9982008-11-25 22:13:57 +0000714 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000715
Andreas Färberff4700b2013-08-26 18:23:18 +0200716 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200717 if (wp->flags & mask) {
718 cpu_watchpoint_remove_by_ref(cpu, wp);
719 }
aliguoric0ce9982008-11-25 22:13:57 +0000720 }
aliguoria1d1bb32008-11-18 20:07:32 +0000721}
Peter Maydell05068c02014-09-12 14:06:48 +0100722
723/* Return true if this watchpoint address matches the specified
724 * access (ie the address range covered by the watchpoint overlaps
725 * partially or completely with the address range covered by the
726 * access).
727 */
728static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
729 vaddr addr,
730 vaddr len)
731{
732 /* We know the lengths are non-zero, but a little caution is
733 * required to avoid errors in the case where the range ends
734 * exactly at the top of the address space and so addr + len
735 * wraps round to zero.
736 */
737 vaddr wpend = wp->vaddr + wp->len - 1;
738 vaddr addrend = addr + len - 1;
739
740 return !(addr > wpend || wp->vaddr > addrend);
741}
742
Paul Brookc527ee82010-03-01 03:31:14 +0000743#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000744
745/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200746int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000747 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000748{
aliguoric0ce9982008-11-25 22:13:57 +0000749 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000750
Anthony Liguori7267c092011-08-20 22:09:37 -0500751 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000752
753 bp->pc = pc;
754 bp->flags = flags;
755
aliguori2dc9f412008-11-18 20:56:59 +0000756 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200757 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200758 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200759 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200760 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200761 }
aliguoria1d1bb32008-11-18 20:07:32 +0000762
Andreas Färberf0c3c502013-08-26 21:22:53 +0200763 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000764
Andreas Färber00b941e2013-06-29 18:55:54 +0200765 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000766 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200767 }
aliguoria1d1bb32008-11-18 20:07:32 +0000768 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000769}
770
771/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200772int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000773{
aliguoria1d1bb32008-11-18 20:07:32 +0000774 CPUBreakpoint *bp;
775
Andreas Färberf0c3c502013-08-26 21:22:53 +0200776 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000777 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200778 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000779 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000780 }
bellard4c3a88a2003-07-26 12:06:08 +0000781 }
aliguoria1d1bb32008-11-18 20:07:32 +0000782 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000783}
784
aliguoria1d1bb32008-11-18 20:07:32 +0000785/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200786void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000787{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200788 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
789
790 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000791
Anthony Liguori7267c092011-08-20 22:09:37 -0500792 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000793}
794
795/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200796void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000797{
aliguoric0ce9982008-11-25 22:13:57 +0000798 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000799
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200801 if (bp->flags & mask) {
802 cpu_breakpoint_remove_by_ref(cpu, bp);
803 }
aliguoric0ce9982008-11-25 22:13:57 +0000804 }
bellard4c3a88a2003-07-26 12:06:08 +0000805}
806
bellardc33a3462003-07-29 20:50:33 +0000807/* enable or disable single step mode. EXCP_DEBUG is returned by the
808 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200809void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000810{
Andreas Färbered2803d2013-06-21 20:20:45 +0200811 if (cpu->singlestep_enabled != enabled) {
812 cpu->singlestep_enabled = enabled;
813 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200814 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200815 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100816 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000817 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700818 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000819 }
bellardc33a3462003-07-29 20:50:33 +0000820 }
bellardc33a3462003-07-29 20:50:33 +0000821}
822
Andreas Färbera47dddd2013-09-03 17:38:47 +0200823void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000824{
825 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000826 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000827
828 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000829 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000830 fprintf(stderr, "qemu: fatal: ");
831 vfprintf(stderr, fmt, ap);
832 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200833 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000834 if (qemu_log_enabled()) {
835 qemu_log("qemu: fatal: ");
836 qemu_log_vprintf(fmt, ap2);
837 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200838 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000839 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000840 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000841 }
pbrook493ae1f2007-11-23 16:53:59 +0000842 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000843 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200844#if defined(CONFIG_USER_ONLY)
845 {
846 struct sigaction act;
847 sigfillset(&act.sa_mask);
848 act.sa_handler = SIG_DFL;
849 sigaction(SIGABRT, &act, NULL);
850 }
851#endif
bellard75012672003-06-21 13:11:07 +0000852 abort();
853}
854
bellard01243112004-01-04 15:48:17 +0000855#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400856/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200857static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
858{
859 RAMBlock *block;
860
Paolo Bonzini43771532013-09-09 17:58:40 +0200861 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200862 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200863 goto found;
864 }
Mike Day0dc3f442013-09-05 14:41:35 -0400865 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200866 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200867 goto found;
868 }
869 }
870
871 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
872 abort();
873
874found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200875 /* It is safe to write mru_block outside the iothread lock. This
876 * is what happens:
877 *
878 * mru_block = xxx
879 * rcu_read_unlock()
880 * xxx removed from list
881 * rcu_read_lock()
882 * read mru_block
883 * mru_block = NULL;
884 * call_rcu(reclaim_ramblock, xxx);
885 * rcu_read_unlock()
886 *
887 * atomic_rcu_set is not needed here. The block was already published
888 * when it was placed into the list. Here we're just making an extra
889 * copy of the pointer.
890 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200891 ram_list.mru_block = block;
892 return block;
893}
894
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200895static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000896{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200897 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200898 RAMBlock *block;
899 ram_addr_t end;
900
901 end = TARGET_PAGE_ALIGN(start + length);
902 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000903
Mike Day0dc3f442013-09-05 14:41:35 -0400904 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200905 block = qemu_get_ram_block(start);
906 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200907 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000908 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400909 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200910}
911
912/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000913bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
914 ram_addr_t length,
915 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200916{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000917 unsigned long end, page;
918 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200919
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000920 if (length == 0) {
921 return false;
922 }
923
924 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
925 page = start >> TARGET_PAGE_BITS;
926 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
927 page, end - page);
928
929 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200930 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200931 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000932
933 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000934}
935
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100936/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200937hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200938 MemoryRegionSection *section,
939 target_ulong vaddr,
940 hwaddr paddr, hwaddr xlat,
941 int prot,
942 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000943{
Avi Kivitya8170e52012-10-23 12:30:10 +0200944 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000945 CPUWatchpoint *wp;
946
Blue Swirlcc5bea62012-04-14 14:56:48 +0000947 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000948 /* Normal RAM. */
949 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200950 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000951 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200952 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000953 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200954 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000955 }
956 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +0100957 AddressSpaceDispatch *d;
958
959 d = atomic_rcu_read(&section->address_space->dispatch);
960 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200961 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000962 }
963
964 /* Make accesses to pages with watchpoints go via the
965 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200966 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100967 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000968 /* Avoid trapping reads of pages with a write breakpoint. */
969 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200970 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000971 *address |= TLB_MMIO;
972 break;
973 }
974 }
975 }
976
977 return iotlb;
978}
bellard9fa3e852004-01-04 18:06:42 +0000979#endif /* defined(CONFIG_USER_ONLY) */
980
pbrooke2eef172008-06-08 01:09:01 +0000981#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000982
Anthony Liguoric227f092009-10-01 16:12:16 -0500983static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200984 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200985static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200986
Igor Mammedova2b257d2014-10-31 16:38:37 +0000987static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
988 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200989
990/*
991 * Set a custom physical guest memory alloator.
992 * Accelerators with unusual needs may need this. Hopefully, we can
993 * get rid of it eventually.
994 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000995void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200996{
997 phys_mem_alloc = alloc;
998}
999
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001000static uint16_t phys_section_add(PhysPageMap *map,
1001 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001002{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001003 /* The physical section number is ORed with a page-aligned
1004 * pointer to produce the iotlb entries. Thus it should
1005 * never overflow into the page-aligned value.
1006 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001007 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001008
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001009 if (map->sections_nb == map->sections_nb_alloc) {
1010 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1011 map->sections = g_renew(MemoryRegionSection, map->sections,
1012 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001013 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001014 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001015 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001016 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001017}
1018
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001019static void phys_section_destroy(MemoryRegion *mr)
1020{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001021 memory_region_unref(mr);
1022
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001023 if (mr->subpage) {
1024 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001025 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001026 g_free(subpage);
1027 }
1028}
1029
Paolo Bonzini60926662013-05-29 12:30:26 +02001030static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001031{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001032 while (map->sections_nb > 0) {
1033 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001034 phys_section_destroy(section->mr);
1035 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001036 g_free(map->sections);
1037 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001038}
1039
Avi Kivityac1970f2012-10-03 16:22:53 +02001040static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001041{
1042 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001043 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001044 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001045 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001046 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001047 MemoryRegionSection subsection = {
1048 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001049 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001050 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001051 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001052
Avi Kivityf3705d52012-03-08 16:16:34 +02001053 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001054
Avi Kivityf3705d52012-03-08 16:16:34 +02001055 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001056 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001057 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001058 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001059 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001060 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001061 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001062 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001063 }
1064 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001065 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001066 subpage_register(subpage, start, end,
1067 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001068}
1069
1070
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001071static void register_multipage(AddressSpaceDispatch *d,
1072 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001073{
Avi Kivitya8170e52012-10-23 12:30:10 +02001074 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001075 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001076 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1077 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001078
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001079 assert(num_pages);
1080 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001081}
1082
Avi Kivityac1970f2012-10-03 16:22:53 +02001083static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001084{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001085 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001086 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001087 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001088 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001089
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001090 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1091 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1092 - now.offset_within_address_space;
1093
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001094 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001095 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001096 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001097 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001098 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001099 while (int128_ne(remain.size, now.size)) {
1100 remain.size = int128_sub(remain.size, now.size);
1101 remain.offset_within_address_space += int128_get64(now.size);
1102 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001103 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001104 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001105 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001106 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001107 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001108 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001109 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001110 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001111 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001112 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001113 }
1114}
1115
Sheng Yang62a27442010-01-26 19:21:16 +08001116void qemu_flush_coalesced_mmio_buffer(void)
1117{
1118 if (kvm_enabled())
1119 kvm_flush_coalesced_mmio_buffer();
1120}
1121
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001122void qemu_mutex_lock_ramlist(void)
1123{
1124 qemu_mutex_lock(&ram_list.mutex);
1125}
1126
1127void qemu_mutex_unlock_ramlist(void)
1128{
1129 qemu_mutex_unlock(&ram_list.mutex);
1130}
1131
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001132#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001133
1134#include <sys/vfs.h>
1135
1136#define HUGETLBFS_MAGIC 0x958458f6
1137
Hu Taofc7a5802014-09-09 13:28:01 +08001138static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001139{
1140 struct statfs fs;
1141 int ret;
1142
1143 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001144 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001145 } while (ret != 0 && errno == EINTR);
1146
1147 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001148 error_setg_errno(errp, errno, "failed to get page size of file %s",
1149 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001150 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001151 }
1152
1153 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001154 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001155
1156 return fs.f_bsize;
1157}
1158
Alex Williamson04b16652010-07-02 11:13:17 -06001159static void *file_ram_alloc(RAMBlock *block,
1160 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001161 const char *path,
1162 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001163{
1164 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001165 char *sanitized_name;
1166 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001167 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001168 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001169 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001170 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001171
Hu Taofc7a5802014-09-09 13:28:01 +08001172 hpagesize = gethugepagesize(path, &local_err);
1173 if (local_err) {
1174 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001175 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001176 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001177 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001178
1179 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001180 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1181 "or larger than huge page size 0x%" PRIx64,
1182 memory, hpagesize);
1183 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001184 }
1185
1186 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001187 error_setg(errp,
1188 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001189 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001190 }
1191
Peter Feiner8ca761f2013-03-04 13:54:25 -05001192 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001193 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001194 for (c = sanitized_name; *c != '\0'; c++) {
1195 if (*c == '/')
1196 *c = '_';
1197 }
1198
1199 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1200 sanitized_name);
1201 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001202
1203 fd = mkstemp(filename);
1204 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001205 error_setg_errno(errp, errno,
1206 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001207 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001208 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001209 }
1210 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001211 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001212
Chen Hanxiao9284f312015-07-24 11:12:03 +08001213 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001214
1215 /*
1216 * ftruncate is not supported by hugetlbfs in older
1217 * hosts, so don't bother bailing out on errors.
1218 * If anything goes wrong with it under other filesystems,
1219 * mmap will fail.
1220 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001221 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001222 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001223 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001225 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1226 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1227 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001228 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001229 error_setg_errno(errp, errno,
1230 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001231 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001232 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001233 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001234
1235 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001236 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001237 }
1238
Alex Williamson04b16652010-07-02 11:13:17 -06001239 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001240 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001241
1242error:
1243 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001244 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001245 exit(1);
1246 }
1247 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001248}
1249#endif
1250
Mike Day0dc3f442013-09-05 14:41:35 -04001251/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001252static ram_addr_t find_ram_offset(ram_addr_t size)
1253{
Alex Williamson04b16652010-07-02 11:13:17 -06001254 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001255 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001256
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001257 assert(size != 0); /* it would hand out same offset multiple times */
1258
Mike Day0dc3f442013-09-05 14:41:35 -04001259 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001260 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001261 }
Alex Williamson04b16652010-07-02 11:13:17 -06001262
Mike Day0dc3f442013-09-05 14:41:35 -04001263 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001264 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001265
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001266 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001267
Mike Day0dc3f442013-09-05 14:41:35 -04001268 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001269 if (next_block->offset >= end) {
1270 next = MIN(next, next_block->offset);
1271 }
1272 }
1273 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001274 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001275 mingap = next - end;
1276 }
1277 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001278
1279 if (offset == RAM_ADDR_MAX) {
1280 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1281 (uint64_t)size);
1282 abort();
1283 }
1284
Alex Williamson04b16652010-07-02 11:13:17 -06001285 return offset;
1286}
1287
Juan Quintela652d7ec2012-07-20 10:37:54 +02001288ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001289{
Alex Williamsond17b5282010-06-25 11:08:38 -06001290 RAMBlock *block;
1291 ram_addr_t last = 0;
1292
Mike Day0dc3f442013-09-05 14:41:35 -04001293 rcu_read_lock();
1294 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001295 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001296 }
Mike Day0dc3f442013-09-05 14:41:35 -04001297 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001298 return last;
1299}
1300
Jason Baronddb97f12012-08-02 15:44:16 -04001301static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1302{
1303 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001304
1305 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001306 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001307 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1308 if (ret) {
1309 perror("qemu_madvise");
1310 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1311 "but dump_guest_core=off specified\n");
1312 }
1313 }
1314}
1315
Mike Day0dc3f442013-09-05 14:41:35 -04001316/* Called within an RCU critical section, or while the ramlist lock
1317 * is held.
1318 */
Hu Tao20cfe882014-04-02 15:13:26 +08001319static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001320{
Hu Tao20cfe882014-04-02 15:13:26 +08001321 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001322
Mike Day0dc3f442013-09-05 14:41:35 -04001323 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001324 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001325 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001326 }
1327 }
Hu Tao20cfe882014-04-02 15:13:26 +08001328
1329 return NULL;
1330}
1331
Mike Dayae3a7042013-09-05 14:41:35 -04001332/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001333void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1334{
Mike Dayae3a7042013-09-05 14:41:35 -04001335 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001336
Mike Day0dc3f442013-09-05 14:41:35 -04001337 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001338 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001339 assert(new_block);
1340 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001341
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001342 if (dev) {
1343 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001344 if (id) {
1345 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001346 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001347 }
1348 }
1349 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1350
Mike Day0dc3f442013-09-05 14:41:35 -04001351 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001352 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001353 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1354 new_block->idstr);
1355 abort();
1356 }
1357 }
Mike Day0dc3f442013-09-05 14:41:35 -04001358 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001359}
1360
Mike Dayae3a7042013-09-05 14:41:35 -04001361/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001362void qemu_ram_unset_idstr(ram_addr_t addr)
1363{
Mike Dayae3a7042013-09-05 14:41:35 -04001364 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001365
Mike Dayae3a7042013-09-05 14:41:35 -04001366 /* FIXME: arch_init.c assumes that this is not called throughout
1367 * migration. Ignore the problem since hot-unplug during migration
1368 * does not work anyway.
1369 */
1370
Mike Day0dc3f442013-09-05 14:41:35 -04001371 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001372 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001373 if (block) {
1374 memset(block->idstr, 0, sizeof(block->idstr));
1375 }
Mike Day0dc3f442013-09-05 14:41:35 -04001376 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001377}
1378
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001379static int memory_try_enable_merging(void *addr, size_t len)
1380{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001381 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001382 /* disabled by the user */
1383 return 0;
1384 }
1385
1386 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1387}
1388
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001389/* Only legal before guest might have detected the memory size: e.g. on
1390 * incoming migration, or right after reset.
1391 *
1392 * As memory core doesn't know how is memory accessed, it is up to
1393 * resize callback to update device state and/or add assertions to detect
1394 * misuse, if necessary.
1395 */
1396int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1397{
1398 RAMBlock *block = find_ram_block(base);
1399
1400 assert(block);
1401
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001402 newsize = TARGET_PAGE_ALIGN(newsize);
1403
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001404 if (block->used_length == newsize) {
1405 return 0;
1406 }
1407
1408 if (!(block->flags & RAM_RESIZEABLE)) {
1409 error_setg_errno(errp, EINVAL,
1410 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1411 " in != 0x" RAM_ADDR_FMT, block->idstr,
1412 newsize, block->used_length);
1413 return -EINVAL;
1414 }
1415
1416 if (block->max_length < newsize) {
1417 error_setg_errno(errp, EINVAL,
1418 "Length too large: %s: 0x" RAM_ADDR_FMT
1419 " > 0x" RAM_ADDR_FMT, block->idstr,
1420 newsize, block->max_length);
1421 return -EINVAL;
1422 }
1423
1424 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1425 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001426 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1427 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001428 memory_region_set_size(block->mr, newsize);
1429 if (block->resized) {
1430 block->resized(block->idstr, newsize, block->host);
1431 }
1432 return 0;
1433}
1434
Hu Taoef701d72014-09-09 13:27:54 +08001435static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001436{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001437 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001438 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001439 ram_addr_t old_ram_size, new_ram_size;
1440
1441 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001442
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001443 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001444 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001445
1446 if (!new_block->host) {
1447 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001448 xen_ram_alloc(new_block->offset, new_block->max_length,
1449 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001450 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001451 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001452 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001453 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001454 error_setg_errno(errp, errno,
1455 "cannot set up guest memory '%s'",
1456 memory_region_name(new_block->mr));
1457 qemu_mutex_unlock_ramlist();
1458 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001459 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001460 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001461 }
1462 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001463
Li Zhijiandd631692015-07-02 20:18:06 +08001464 new_ram_size = MAX(old_ram_size,
1465 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1466 if (new_ram_size > old_ram_size) {
1467 migration_bitmap_extend(old_ram_size, new_ram_size);
1468 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001469 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1470 * QLIST (which has an RCU-friendly variant) does not have insertion at
1471 * tail, so save the last element in last_block.
1472 */
Mike Day0dc3f442013-09-05 14:41:35 -04001473 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001474 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001475 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001476 break;
1477 }
1478 }
1479 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001480 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001481 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001482 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001483 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001484 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001485 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001486 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001487
Mike Day0dc3f442013-09-05 14:41:35 -04001488 /* Write list before version */
1489 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001490 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001491 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001492
Juan Quintela2152f5c2013-10-08 13:52:02 +02001493 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1494
1495 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001496 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001497
1498 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001499 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1500 ram_list.dirty_memory[i] =
1501 bitmap_zero_extend(ram_list.dirty_memory[i],
1502 old_ram_size, new_ram_size);
1503 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001504 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001505 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001506 new_block->used_length,
1507 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001508
Paolo Bonzinia904c912015-01-21 16:18:35 +01001509 if (new_block->host) {
1510 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1511 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1512 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1513 if (kvm_enabled()) {
1514 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1515 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001516 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001517
1518 return new_block->offset;
1519}
1520
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001521#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001522ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001523 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001524 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001525{
1526 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001527 ram_addr_t addr;
1528 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001529
1530 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001531 error_setg(errp, "-mem-path not supported with Xen");
1532 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001533 }
1534
1535 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1536 /*
1537 * file_ram_alloc() needs to allocate just like
1538 * phys_mem_alloc, but we haven't bothered to provide
1539 * a hook there.
1540 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001541 error_setg(errp,
1542 "-mem-path not supported with this accelerator");
1543 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001544 }
1545
1546 size = TARGET_PAGE_ALIGN(size);
1547 new_block = g_malloc0(sizeof(*new_block));
1548 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001549 new_block->used_length = size;
1550 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001551 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001552 new_block->host = file_ram_alloc(new_block, size,
1553 mem_path, errp);
1554 if (!new_block->host) {
1555 g_free(new_block);
1556 return -1;
1557 }
1558
Hu Taoef701d72014-09-09 13:27:54 +08001559 addr = ram_block_add(new_block, &local_err);
1560 if (local_err) {
1561 g_free(new_block);
1562 error_propagate(errp, local_err);
1563 return -1;
1564 }
1565 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001566}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001567#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001568
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001569static
1570ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1571 void (*resized)(const char*,
1572 uint64_t length,
1573 void *host),
1574 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001575 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001576{
1577 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001578 ram_addr_t addr;
1579 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001580
1581 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001582 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001583 new_block = g_malloc0(sizeof(*new_block));
1584 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001585 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001586 new_block->used_length = size;
1587 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001588 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001589 new_block->fd = -1;
1590 new_block->host = host;
1591 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001592 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001593 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001594 if (resizeable) {
1595 new_block->flags |= RAM_RESIZEABLE;
1596 }
Hu Taoef701d72014-09-09 13:27:54 +08001597 addr = ram_block_add(new_block, &local_err);
1598 if (local_err) {
1599 g_free(new_block);
1600 error_propagate(errp, local_err);
1601 return -1;
1602 }
1603 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001604}
1605
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001606ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1607 MemoryRegion *mr, Error **errp)
1608{
1609 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1610}
1611
Hu Taoef701d72014-09-09 13:27:54 +08001612ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001613{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001614 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1615}
1616
1617ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1618 void (*resized)(const char*,
1619 uint64_t length,
1620 void *host),
1621 MemoryRegion *mr, Error **errp)
1622{
1623 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001624}
bellarde9a1ab12007-02-08 23:08:38 +00001625
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001626void qemu_ram_free_from_ptr(ram_addr_t addr)
1627{
1628 RAMBlock *block;
1629
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001630 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001631 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001632 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001633 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001634 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001635 /* Write list before version */
1636 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001637 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001638 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001639 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001640 }
1641 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001642 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001643}
1644
Paolo Bonzini43771532013-09-09 17:58:40 +02001645static void reclaim_ramblock(RAMBlock *block)
1646{
1647 if (block->flags & RAM_PREALLOC) {
1648 ;
1649 } else if (xen_enabled()) {
1650 xen_invalidate_map_cache_entry(block->host);
1651#ifndef _WIN32
1652 } else if (block->fd >= 0) {
1653 munmap(block->host, block->max_length);
1654 close(block->fd);
1655#endif
1656 } else {
1657 qemu_anon_ram_free(block->host, block->max_length);
1658 }
1659 g_free(block);
1660}
1661
Anthony Liguoric227f092009-10-01 16:12:16 -05001662void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001663{
Alex Williamson04b16652010-07-02 11:13:17 -06001664 RAMBlock *block;
1665
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001666 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001667 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001668 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001669 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001670 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001671 /* Write list before version */
1672 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001673 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001674 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001675 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001676 }
1677 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001678 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001679}
1680
Huang Yingcd19cfa2011-03-02 08:56:19 +01001681#ifndef _WIN32
1682void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1683{
1684 RAMBlock *block;
1685 ram_addr_t offset;
1686 int flags;
1687 void *area, *vaddr;
1688
Mike Day0dc3f442013-09-05 14:41:35 -04001689 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001690 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001691 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001692 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001693 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001694 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001695 } else if (xen_enabled()) {
1696 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001697 } else {
1698 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001699 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001700 flags |= (block->flags & RAM_SHARED ?
1701 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001702 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1703 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001704 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001705 /*
1706 * Remap needs to match alloc. Accelerators that
1707 * set phys_mem_alloc never remap. If they did,
1708 * we'd need a remap hook here.
1709 */
1710 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1711
Huang Yingcd19cfa2011-03-02 08:56:19 +01001712 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1713 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1714 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001715 }
1716 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001717 fprintf(stderr, "Could not remap addr: "
1718 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001719 length, addr);
1720 exit(1);
1721 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001722 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001723 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001724 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001725 }
1726 }
1727}
1728#endif /* !_WIN32 */
1729
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001730int qemu_get_ram_fd(ram_addr_t addr)
1731{
Mike Dayae3a7042013-09-05 14:41:35 -04001732 RAMBlock *block;
1733 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001734
Mike Day0dc3f442013-09-05 14:41:35 -04001735 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001736 block = qemu_get_ram_block(addr);
1737 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001738 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001739 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001740}
1741
Damjan Marion3fd74b82014-06-26 23:01:32 +02001742void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1743{
Mike Dayae3a7042013-09-05 14:41:35 -04001744 RAMBlock *block;
1745 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001746
Mike Day0dc3f442013-09-05 14:41:35 -04001747 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001748 block = qemu_get_ram_block(addr);
1749 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001750 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001751 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001752}
1753
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001754/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001755 * This should not be used for general purpose DMA. Use address_space_map
1756 * or address_space_rw instead. For local memory (e.g. video ram) that the
1757 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001758 *
1759 * By the time this function returns, the returned pointer is not protected
1760 * by RCU anymore. If the caller is not within an RCU critical section and
1761 * does not hold the iothread lock, it must have other means of protecting the
1762 * pointer, such as a reference to the region that includes the incoming
1763 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001764 */
1765void *qemu_get_ram_ptr(ram_addr_t addr)
1766{
Mike Dayae3a7042013-09-05 14:41:35 -04001767 RAMBlock *block;
1768 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001769
Mike Day0dc3f442013-09-05 14:41:35 -04001770 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001771 block = qemu_get_ram_block(addr);
1772
1773 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001774 /* We need to check if the requested address is in the RAM
1775 * because we don't want to map the entire memory in QEMU.
1776 * In that case just map until the end of the page.
1777 */
1778 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001779 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001780 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001781 }
Mike Dayae3a7042013-09-05 14:41:35 -04001782
1783 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001784 }
Mike Dayae3a7042013-09-05 14:41:35 -04001785 ptr = ramblock_ptr(block, addr - block->offset);
1786
Mike Day0dc3f442013-09-05 14:41:35 -04001787unlock:
1788 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001789 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001790}
1791
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001792/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001793 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001794 *
1795 * By the time this function returns, the returned pointer is not protected
1796 * by RCU anymore. If the caller is not within an RCU critical section and
1797 * does not hold the iothread lock, it must have other means of protecting the
1798 * pointer, such as a reference to the region that includes the incoming
1799 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001800 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001801static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001802{
Mike Dayae3a7042013-09-05 14:41:35 -04001803 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001804 if (*size == 0) {
1805 return NULL;
1806 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001807 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001808 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001809 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001810 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001811 rcu_read_lock();
1812 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001813 if (addr - block->offset < block->max_length) {
1814 if (addr - block->offset + *size > block->max_length)
1815 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001816 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001817 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001818 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001819 }
1820 }
1821
1822 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1823 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001824 }
1825}
1826
Paolo Bonzini7443b432013-06-03 12:44:02 +02001827/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001828 * (typically a TLB entry) back to a ram offset.
1829 *
1830 * By the time this function returns, the returned pointer is not protected
1831 * by RCU anymore. If the caller is not within an RCU critical section and
1832 * does not hold the iothread lock, it must have other means of protecting the
1833 * pointer, such as a reference to the region that includes the incoming
1834 * ram_addr_t.
1835 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001836MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001837{
pbrook94a6b542009-04-11 17:15:54 +00001838 RAMBlock *block;
1839 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001840 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001841
Jan Kiszka868bb332011-06-21 22:59:09 +02001842 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001843 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001844 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001845 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001846 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001847 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001848 }
1849
Mike Day0dc3f442013-09-05 14:41:35 -04001850 rcu_read_lock();
1851 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001852 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001853 goto found;
1854 }
1855
Mike Day0dc3f442013-09-05 14:41:35 -04001856 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001857 /* This case append when the block is not mapped. */
1858 if (block->host == NULL) {
1859 continue;
1860 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001861 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001862 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001863 }
pbrook94a6b542009-04-11 17:15:54 +00001864 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001865
Mike Day0dc3f442013-09-05 14:41:35 -04001866 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001867 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001868
1869found:
1870 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001871 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001872 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001873 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001874}
Alex Williamsonf471a172010-06-11 11:11:42 -06001875
Avi Kivitya8170e52012-10-23 12:30:10 +02001876static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001877 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001878{
Juan Quintela52159192013-10-08 12:44:04 +02001879 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001880 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001881 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001882 switch (size) {
1883 case 1:
1884 stb_p(qemu_get_ram_ptr(ram_addr), val);
1885 break;
1886 case 2:
1887 stw_p(qemu_get_ram_ptr(ram_addr), val);
1888 break;
1889 case 4:
1890 stl_p(qemu_get_ram_ptr(ram_addr), val);
1891 break;
1892 default:
1893 abort();
1894 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001895 /* Set both VGA and migration bits for simplicity and to remove
1896 * the notdirty callback faster.
1897 */
1898 cpu_physical_memory_set_dirty_range(ram_addr, size,
1899 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001900 /* we remove the notdirty callback only if the code has been
1901 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001902 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001903 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001904 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001905 }
bellard1ccde1c2004-02-06 19:46:14 +00001906}
1907
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001908static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1909 unsigned size, bool is_write)
1910{
1911 return is_write;
1912}
1913
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001914static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001915 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001916 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001917 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001918};
1919
pbrook0f459d12008-06-09 00:20:13 +00001920/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001921static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001922{
Andreas Färber93afead2013-08-26 03:41:01 +02001923 CPUState *cpu = current_cpu;
1924 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001925 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001926 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001927 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001928 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001929
Andreas Färberff4700b2013-08-26 18:23:18 +02001930 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001931 /* We re-entered the check after replacing the TB. Now raise
1932 * the debug interrupt so that is will trigger after the
1933 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001934 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001935 return;
1936 }
Andreas Färber93afead2013-08-26 03:41:01 +02001937 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001938 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001939 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1940 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001941 if (flags == BP_MEM_READ) {
1942 wp->flags |= BP_WATCHPOINT_HIT_READ;
1943 } else {
1944 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1945 }
1946 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001947 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001948 if (!cpu->watchpoint_hit) {
1949 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001950 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001951 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001952 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001953 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001954 } else {
1955 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001956 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001957 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001958 }
aliguori06d55cc2008-11-18 20:24:06 +00001959 }
aliguori6e140f22008-11-18 20:37:55 +00001960 } else {
1961 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001962 }
1963 }
1964}
1965
pbrook6658ffb2007-03-16 23:58:11 +00001966/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1967 so these check for a hit then pass through to the normal out-of-line
1968 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001969static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1970 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001971{
Peter Maydell66b9b432015-04-26 16:49:24 +01001972 MemTxResult res;
1973 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001974
Peter Maydell66b9b432015-04-26 16:49:24 +01001975 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001976 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001977 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001978 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001979 break;
1980 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001981 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001982 break;
1983 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001984 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001985 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001986 default: abort();
1987 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001988 *pdata = data;
1989 return res;
1990}
1991
1992static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1993 uint64_t val, unsigned size,
1994 MemTxAttrs attrs)
1995{
1996 MemTxResult res;
1997
1998 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1999 switch (size) {
2000 case 1:
2001 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2002 break;
2003 case 2:
2004 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2005 break;
2006 case 4:
2007 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2008 break;
2009 default: abort();
2010 }
2011 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002012}
2013
Avi Kivity1ec9b902012-01-02 12:47:48 +02002014static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002015 .read_with_attrs = watch_mem_read,
2016 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002017 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002018};
pbrook6658ffb2007-03-16 23:58:11 +00002019
Peter Maydellf25a49e2015-04-26 16:49:24 +01002020static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2021 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002022{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002023 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002024 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002025 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002026
blueswir1db7b5422007-05-26 17:36:03 +00002027#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002028 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002029 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002030#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002031 res = address_space_read(subpage->as, addr + subpage->base,
2032 attrs, buf, len);
2033 if (res) {
2034 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002035 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002036 switch (len) {
2037 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002038 *data = ldub_p(buf);
2039 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002040 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002041 *data = lduw_p(buf);
2042 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002043 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002044 *data = ldl_p(buf);
2045 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002046 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002047 *data = ldq_p(buf);
2048 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002049 default:
2050 abort();
2051 }
blueswir1db7b5422007-05-26 17:36:03 +00002052}
2053
Peter Maydellf25a49e2015-04-26 16:49:24 +01002054static MemTxResult subpage_write(void *opaque, hwaddr addr,
2055 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002056{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002057 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002058 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002059
blueswir1db7b5422007-05-26 17:36:03 +00002060#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002061 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002062 " value %"PRIx64"\n",
2063 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002064#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002065 switch (len) {
2066 case 1:
2067 stb_p(buf, value);
2068 break;
2069 case 2:
2070 stw_p(buf, value);
2071 break;
2072 case 4:
2073 stl_p(buf, value);
2074 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002075 case 8:
2076 stq_p(buf, value);
2077 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002078 default:
2079 abort();
2080 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002081 return address_space_write(subpage->as, addr + subpage->base,
2082 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002083}
2084
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002085static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002086 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002087{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002088 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002089#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002090 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002091 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002092#endif
2093
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002094 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002095 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002096}
2097
Avi Kivity70c68e42012-01-02 12:32:48 +02002098static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002099 .read_with_attrs = subpage_read,
2100 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002101 .impl.min_access_size = 1,
2102 .impl.max_access_size = 8,
2103 .valid.min_access_size = 1,
2104 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002105 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002106 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002107};
2108
Anthony Liguoric227f092009-10-01 16:12:16 -05002109static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002110 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002111{
2112 int idx, eidx;
2113
2114 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2115 return -1;
2116 idx = SUBPAGE_IDX(start);
2117 eidx = SUBPAGE_IDX(end);
2118#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002119 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2120 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002121#endif
blueswir1db7b5422007-05-26 17:36:03 +00002122 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002123 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002124 }
2125
2126 return 0;
2127}
2128
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002129static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002130{
Anthony Liguoric227f092009-10-01 16:12:16 -05002131 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002132
Anthony Liguori7267c092011-08-20 22:09:37 -05002133 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002134
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002135 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002136 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002137 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002138 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002139 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002140#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002141 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2142 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002143#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002144 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002145
2146 return mmio;
2147}
2148
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002149static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2150 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002151{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002152 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002153 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002154 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002155 .mr = mr,
2156 .offset_within_address_space = 0,
2157 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002158 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002159 };
2160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002161 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002162}
2163
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002164MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002165{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002166 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2167 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002168
2169 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002170}
2171
Avi Kivitye9179ce2009-06-14 11:38:52 +03002172static void io_mem_init(void)
2173{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002174 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002175 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002176 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002177 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002178 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002179 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002180 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002181}
2182
Avi Kivityac1970f2012-10-03 16:22:53 +02002183static void mem_begin(MemoryListener *listener)
2184{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002185 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002186 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2187 uint16_t n;
2188
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002189 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002190 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002191 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002192 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002193 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002194 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002195 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002196 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002197
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002198 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002199 d->as = as;
2200 as->next_dispatch = d;
2201}
2202
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002203static void address_space_dispatch_free(AddressSpaceDispatch *d)
2204{
2205 phys_sections_free(&d->map);
2206 g_free(d);
2207}
2208
Paolo Bonzini00752702013-05-29 12:13:54 +02002209static void mem_commit(MemoryListener *listener)
2210{
2211 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002212 AddressSpaceDispatch *cur = as->dispatch;
2213 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002214
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002215 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002216
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002217 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002218 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002219 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002220 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002221}
2222
Avi Kivity1d711482012-10-02 18:54:45 +02002223static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002224{
Andreas Färber182735e2013-05-29 22:29:20 +02002225 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002226
2227 /* since each CPU stores ram addresses in its TLB cache, we must
2228 reset the modified entries */
2229 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002230 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002231 /* FIXME: Disentangle the cpu.h circular files deps so we can
2232 directly get the right CPU from listener. */
2233 if (cpu->tcg_as_listener != listener) {
2234 continue;
2235 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002236 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002237 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002238}
2239
Avi Kivityac1970f2012-10-03 16:22:53 +02002240void address_space_init_dispatch(AddressSpace *as)
2241{
Paolo Bonzini00752702013-05-29 12:13:54 +02002242 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002243 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002244 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002245 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002246 .region_add = mem_add,
2247 .region_nop = mem_add,
2248 .priority = 0,
2249 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002250 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002251}
2252
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002253void address_space_unregister(AddressSpace *as)
2254{
2255 memory_listener_unregister(&as->dispatch_listener);
2256}
2257
Avi Kivity83f3c252012-10-07 12:59:55 +02002258void address_space_destroy_dispatch(AddressSpace *as)
2259{
2260 AddressSpaceDispatch *d = as->dispatch;
2261
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002262 atomic_rcu_set(&as->dispatch, NULL);
2263 if (d) {
2264 call_rcu(d, address_space_dispatch_free, rcu);
2265 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002266}
2267
Avi Kivity62152b82011-07-26 14:26:14 +03002268static void memory_map_init(void)
2269{
Anthony Liguori7267c092011-08-20 22:09:37 -05002270 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002271
Paolo Bonzini57271d62013-11-07 17:14:37 +01002272 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002273 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002274
Anthony Liguori7267c092011-08-20 22:09:37 -05002275 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002276 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2277 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002278 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002279}
2280
2281MemoryRegion *get_system_memory(void)
2282{
2283 return system_memory;
2284}
2285
Avi Kivity309cb472011-08-08 16:09:03 +03002286MemoryRegion *get_system_io(void)
2287{
2288 return system_io;
2289}
2290
pbrooke2eef172008-06-08 01:09:01 +00002291#endif /* !defined(CONFIG_USER_ONLY) */
2292
bellard13eb76e2004-01-24 15:23:36 +00002293/* physical memory access (slow version, mainly for debug) */
2294#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002295int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002296 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002297{
2298 int l, flags;
2299 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002300 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002301
2302 while (len > 0) {
2303 page = addr & TARGET_PAGE_MASK;
2304 l = (page + TARGET_PAGE_SIZE) - addr;
2305 if (l > len)
2306 l = len;
2307 flags = page_get_flags(page);
2308 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002309 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002310 if (is_write) {
2311 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002312 return -1;
bellard579a97f2007-11-11 14:26:47 +00002313 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002314 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002315 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002316 memcpy(p, buf, l);
2317 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002318 } else {
2319 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002320 return -1;
bellard579a97f2007-11-11 14:26:47 +00002321 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002322 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002323 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002324 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002325 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002326 }
2327 len -= l;
2328 buf += l;
2329 addr += l;
2330 }
Paul Brooka68fe892010-03-01 00:08:59 +00002331 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002332}
bellard8df1cd02005-01-28 22:37:22 +00002333
bellard13eb76e2004-01-24 15:23:36 +00002334#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002335
Paolo Bonzini845b6212015-03-23 11:45:53 +01002336static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002337 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002338{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002339 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2340 /* No early return if dirty_log_mask is or becomes 0, because
2341 * cpu_physical_memory_set_dirty_range will still call
2342 * xen_modified_memory.
2343 */
2344 if (dirty_log_mask) {
2345 dirty_log_mask =
2346 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002347 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002348 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2349 tb_invalidate_phys_range(addr, addr + length);
2350 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2351 }
2352 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002353}
2354
Richard Henderson23326162013-07-08 14:55:59 -07002355static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002356{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002357 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002358
2359 /* Regions are assumed to support 1-4 byte accesses unless
2360 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002361 if (access_size_max == 0) {
2362 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002363 }
Richard Henderson23326162013-07-08 14:55:59 -07002364
2365 /* Bound the maximum access by the alignment of the address. */
2366 if (!mr->ops->impl.unaligned) {
2367 unsigned align_size_max = addr & -addr;
2368 if (align_size_max != 0 && align_size_max < access_size_max) {
2369 access_size_max = align_size_max;
2370 }
2371 }
2372
2373 /* Don't attempt accesses larger than the maximum. */
2374 if (l > access_size_max) {
2375 l = access_size_max;
2376 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002377 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002378
2379 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002380}
2381
Jan Kiszka4840f102015-06-18 18:47:22 +02002382static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002383{
Jan Kiszka4840f102015-06-18 18:47:22 +02002384 bool unlocked = !qemu_mutex_iothread_locked();
2385 bool release_lock = false;
2386
2387 if (unlocked && mr->global_locking) {
2388 qemu_mutex_lock_iothread();
2389 unlocked = false;
2390 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002391 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002392 if (mr->flush_coalesced_mmio) {
2393 if (unlocked) {
2394 qemu_mutex_lock_iothread();
2395 }
2396 qemu_flush_coalesced_mmio_buffer();
2397 if (unlocked) {
2398 qemu_mutex_unlock_iothread();
2399 }
2400 }
2401
2402 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002403}
2404
Peter Maydell5c9eb022015-04-26 16:49:24 +01002405MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2406 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002407{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002408 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002409 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002410 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002411 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002412 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002413 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002414 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002415
Paolo Bonzini41063e12015-03-18 14:21:43 +01002416 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002417 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002418 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002419 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002420
bellard13eb76e2004-01-24 15:23:36 +00002421 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002422 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002423 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002424 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002425 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002426 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002427 switch (l) {
2428 case 8:
2429 /* 64 bit write access */
2430 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002431 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2432 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002433 break;
2434 case 4:
bellard1c213d12005-09-03 10:49:04 +00002435 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002436 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002437 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2438 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002439 break;
2440 case 2:
bellard1c213d12005-09-03 10:49:04 +00002441 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002442 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002443 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2444 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002445 break;
2446 case 1:
bellard1c213d12005-09-03 10:49:04 +00002447 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002448 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002449 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2450 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002451 break;
2452 default:
2453 abort();
bellard13eb76e2004-01-24 15:23:36 +00002454 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002455 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002456 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002457 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002458 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002459 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002460 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002461 }
2462 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002463 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002464 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002465 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002466 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002467 switch (l) {
2468 case 8:
2469 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002470 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2471 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002472 stq_p(buf, val);
2473 break;
2474 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002475 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002476 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2477 attrs);
bellardc27004e2005-01-03 23:35:10 +00002478 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002479 break;
2480 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002481 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002482 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2483 attrs);
bellardc27004e2005-01-03 23:35:10 +00002484 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002485 break;
2486 case 1:
bellard1c213d12005-09-03 10:49:04 +00002487 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002488 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2489 attrs);
bellardc27004e2005-01-03 23:35:10 +00002490 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002491 break;
2492 default:
2493 abort();
bellard13eb76e2004-01-24 15:23:36 +00002494 }
2495 } else {
2496 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002497 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002498 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002499 }
2500 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002501
2502 if (release_lock) {
2503 qemu_mutex_unlock_iothread();
2504 release_lock = false;
2505 }
2506
bellard13eb76e2004-01-24 15:23:36 +00002507 len -= l;
2508 buf += l;
2509 addr += l;
2510 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002511 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002512
Peter Maydell3b643492015-04-26 16:49:23 +01002513 return result;
bellard13eb76e2004-01-24 15:23:36 +00002514}
bellard8df1cd02005-01-28 22:37:22 +00002515
Peter Maydell5c9eb022015-04-26 16:49:24 +01002516MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2517 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002518{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002519 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002520}
2521
Peter Maydell5c9eb022015-04-26 16:49:24 +01002522MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2523 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002524{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002525 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002526}
2527
2528
Avi Kivitya8170e52012-10-23 12:30:10 +02002529void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002530 int len, int is_write)
2531{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002532 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2533 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002534}
2535
Alexander Graf582b55a2013-12-11 14:17:44 +01002536enum write_rom_type {
2537 WRITE_DATA,
2538 FLUSH_CACHE,
2539};
2540
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002541static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002542 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002543{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002544 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002545 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002546 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002547 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002548
Paolo Bonzini41063e12015-03-18 14:21:43 +01002549 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002550 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002551 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002552 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002553
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002554 if (!(memory_region_is_ram(mr) ||
2555 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002556 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002557 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002558 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002559 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002560 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002561 switch (type) {
2562 case WRITE_DATA:
2563 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002564 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002565 break;
2566 case FLUSH_CACHE:
2567 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2568 break;
2569 }
bellardd0ecd2a2006-04-23 17:14:48 +00002570 }
2571 len -= l;
2572 buf += l;
2573 addr += l;
2574 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002575 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002576}
2577
Alexander Graf582b55a2013-12-11 14:17:44 +01002578/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002579void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002580 const uint8_t *buf, int len)
2581{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002582 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002583}
2584
2585void cpu_flush_icache_range(hwaddr start, int len)
2586{
2587 /*
2588 * This function should do the same thing as an icache flush that was
2589 * triggered from within the guest. For TCG we are always cache coherent,
2590 * so there is no need to flush anything. For KVM / Xen we need to flush
2591 * the host's instruction cache at least.
2592 */
2593 if (tcg_enabled()) {
2594 return;
2595 }
2596
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002597 cpu_physical_memory_write_rom_internal(&address_space_memory,
2598 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002599}
2600
aliguori6d16c2f2009-01-22 16:59:11 +00002601typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002602 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002603 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002604 hwaddr addr;
2605 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002606 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002607} BounceBuffer;
2608
2609static BounceBuffer bounce;
2610
aliguoriba223c22009-01-22 16:59:16 +00002611typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002612 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002613 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002614} MapClient;
2615
Fam Zheng38e047b2015-03-16 17:03:35 +08002616QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002617static QLIST_HEAD(map_client_list, MapClient) map_client_list
2618 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002619
Fam Zhenge95205e2015-03-16 17:03:37 +08002620static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002621{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002622 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002623 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002624}
2625
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002626static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002627{
2628 MapClient *client;
2629
Blue Swirl72cf2d42009-09-12 07:36:22 +00002630 while (!QLIST_EMPTY(&map_client_list)) {
2631 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002632 qemu_bh_schedule(client->bh);
2633 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002634 }
2635}
2636
Fam Zhenge95205e2015-03-16 17:03:37 +08002637void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002638{
2639 MapClient *client = g_malloc(sizeof(*client));
2640
Fam Zheng38e047b2015-03-16 17:03:35 +08002641 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002642 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002643 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002644 if (!atomic_read(&bounce.in_use)) {
2645 cpu_notify_map_clients_locked();
2646 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002647 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002648}
2649
Fam Zheng38e047b2015-03-16 17:03:35 +08002650void cpu_exec_init_all(void)
2651{
2652 qemu_mutex_init(&ram_list.mutex);
2653 memory_map_init();
2654 io_mem_init();
2655 qemu_mutex_init(&map_client_list_lock);
2656}
2657
Fam Zhenge95205e2015-03-16 17:03:37 +08002658void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002659{
Fam Zhenge95205e2015-03-16 17:03:37 +08002660 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002661
Fam Zhenge95205e2015-03-16 17:03:37 +08002662 qemu_mutex_lock(&map_client_list_lock);
2663 QLIST_FOREACH(client, &map_client_list, link) {
2664 if (client->bh == bh) {
2665 cpu_unregister_map_client_do(client);
2666 break;
2667 }
2668 }
2669 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002670}
2671
2672static void cpu_notify_map_clients(void)
2673{
Fam Zheng38e047b2015-03-16 17:03:35 +08002674 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002675 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002676 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002677}
2678
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002679bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2680{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002681 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002682 hwaddr l, xlat;
2683
Paolo Bonzini41063e12015-03-18 14:21:43 +01002684 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002685 while (len > 0) {
2686 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002687 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2688 if (!memory_access_is_direct(mr, is_write)) {
2689 l = memory_access_size(mr, l, addr);
2690 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002691 return false;
2692 }
2693 }
2694
2695 len -= l;
2696 addr += l;
2697 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002698 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002699 return true;
2700}
2701
aliguori6d16c2f2009-01-22 16:59:11 +00002702/* Map a physical memory region into a host virtual address.
2703 * May map a subset of the requested range, given by and returned in *plen.
2704 * May return NULL if resources needed to perform the mapping are exhausted.
2705 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002706 * Use cpu_register_map_client() to know when retrying the map operation is
2707 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002708 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002709void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002710 hwaddr addr,
2711 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002712 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002713{
Avi Kivitya8170e52012-10-23 12:30:10 +02002714 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002715 hwaddr done = 0;
2716 hwaddr l, xlat, base;
2717 MemoryRegion *mr, *this_mr;
2718 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002719
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002720 if (len == 0) {
2721 return NULL;
2722 }
aliguori6d16c2f2009-01-22 16:59:11 +00002723
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002724 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002725 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002726 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002727
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002728 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002729 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002730 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002731 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002732 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002733 /* Avoid unbounded allocations */
2734 l = MIN(l, TARGET_PAGE_SIZE);
2735 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002736 bounce.addr = addr;
2737 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002738
2739 memory_region_ref(mr);
2740 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002741 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002742 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2743 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002744 }
aliguori6d16c2f2009-01-22 16:59:11 +00002745
Paolo Bonzini41063e12015-03-18 14:21:43 +01002746 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002747 *plen = l;
2748 return bounce.buffer;
2749 }
2750
2751 base = xlat;
2752 raddr = memory_region_get_ram_addr(mr);
2753
2754 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002755 len -= l;
2756 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002757 done += l;
2758 if (len == 0) {
2759 break;
2760 }
2761
2762 l = len;
2763 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2764 if (this_mr != mr || xlat != base + done) {
2765 break;
2766 }
aliguori6d16c2f2009-01-22 16:59:11 +00002767 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002768
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002769 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002770 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002771 *plen = done;
2772 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002773}
2774
Avi Kivityac1970f2012-10-03 16:22:53 +02002775/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002776 * Will also mark the memory as dirty if is_write == 1. access_len gives
2777 * the amount of memory that was actually read or written by the caller.
2778 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002779void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2780 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002781{
2782 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002783 MemoryRegion *mr;
2784 ram_addr_t addr1;
2785
2786 mr = qemu_ram_addr_from_host(buffer, &addr1);
2787 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002788 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002789 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002790 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002791 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002792 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002793 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002794 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002795 return;
2796 }
2797 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002798 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2799 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002800 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002801 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002802 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002803 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002804 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002805 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002806}
bellardd0ecd2a2006-04-23 17:14:48 +00002807
Avi Kivitya8170e52012-10-23 12:30:10 +02002808void *cpu_physical_memory_map(hwaddr addr,
2809 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002810 int is_write)
2811{
2812 return address_space_map(&address_space_memory, addr, plen, is_write);
2813}
2814
Avi Kivitya8170e52012-10-23 12:30:10 +02002815void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2816 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002817{
2818 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2819}
2820
bellard8df1cd02005-01-28 22:37:22 +00002821/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002822static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2823 MemTxAttrs attrs,
2824 MemTxResult *result,
2825 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002826{
bellard8df1cd02005-01-28 22:37:22 +00002827 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002828 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002829 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002830 hwaddr l = 4;
2831 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002832 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002833 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002834
Paolo Bonzini41063e12015-03-18 14:21:43 +01002835 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002836 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002837 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002838 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002839
bellard8df1cd02005-01-28 22:37:22 +00002840 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002841 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002842#if defined(TARGET_WORDS_BIGENDIAN)
2843 if (endian == DEVICE_LITTLE_ENDIAN) {
2844 val = bswap32(val);
2845 }
2846#else
2847 if (endian == DEVICE_BIG_ENDIAN) {
2848 val = bswap32(val);
2849 }
2850#endif
bellard8df1cd02005-01-28 22:37:22 +00002851 } else {
2852 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002853 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002854 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002855 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002856 switch (endian) {
2857 case DEVICE_LITTLE_ENDIAN:
2858 val = ldl_le_p(ptr);
2859 break;
2860 case DEVICE_BIG_ENDIAN:
2861 val = ldl_be_p(ptr);
2862 break;
2863 default:
2864 val = ldl_p(ptr);
2865 break;
2866 }
Peter Maydell50013112015-04-26 16:49:24 +01002867 r = MEMTX_OK;
2868 }
2869 if (result) {
2870 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002871 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002872 if (release_lock) {
2873 qemu_mutex_unlock_iothread();
2874 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002875 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002876 return val;
2877}
2878
Peter Maydell50013112015-04-26 16:49:24 +01002879uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2880 MemTxAttrs attrs, MemTxResult *result)
2881{
2882 return address_space_ldl_internal(as, addr, attrs, result,
2883 DEVICE_NATIVE_ENDIAN);
2884}
2885
2886uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2887 MemTxAttrs attrs, MemTxResult *result)
2888{
2889 return address_space_ldl_internal(as, addr, attrs, result,
2890 DEVICE_LITTLE_ENDIAN);
2891}
2892
2893uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2894 MemTxAttrs attrs, MemTxResult *result)
2895{
2896 return address_space_ldl_internal(as, addr, attrs, result,
2897 DEVICE_BIG_ENDIAN);
2898}
2899
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002900uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002901{
Peter Maydell50013112015-04-26 16:49:24 +01002902 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002903}
2904
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002905uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002906{
Peter Maydell50013112015-04-26 16:49:24 +01002907 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002908}
2909
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002910uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002911{
Peter Maydell50013112015-04-26 16:49:24 +01002912 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002913}
2914
bellard84b7b8e2005-11-28 21:19:04 +00002915/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002916static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2917 MemTxAttrs attrs,
2918 MemTxResult *result,
2919 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002920{
bellard84b7b8e2005-11-28 21:19:04 +00002921 uint8_t *ptr;
2922 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002923 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002924 hwaddr l = 8;
2925 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002926 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002927 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002928
Paolo Bonzini41063e12015-03-18 14:21:43 +01002929 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002930 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002931 false);
2932 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002933 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002934
bellard84b7b8e2005-11-28 21:19:04 +00002935 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002936 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002937#if defined(TARGET_WORDS_BIGENDIAN)
2938 if (endian == DEVICE_LITTLE_ENDIAN) {
2939 val = bswap64(val);
2940 }
2941#else
2942 if (endian == DEVICE_BIG_ENDIAN) {
2943 val = bswap64(val);
2944 }
2945#endif
bellard84b7b8e2005-11-28 21:19:04 +00002946 } else {
2947 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002948 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002949 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002950 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002951 switch (endian) {
2952 case DEVICE_LITTLE_ENDIAN:
2953 val = ldq_le_p(ptr);
2954 break;
2955 case DEVICE_BIG_ENDIAN:
2956 val = ldq_be_p(ptr);
2957 break;
2958 default:
2959 val = ldq_p(ptr);
2960 break;
2961 }
Peter Maydell50013112015-04-26 16:49:24 +01002962 r = MEMTX_OK;
2963 }
2964 if (result) {
2965 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002966 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002967 if (release_lock) {
2968 qemu_mutex_unlock_iothread();
2969 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002970 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002971 return val;
2972}
2973
Peter Maydell50013112015-04-26 16:49:24 +01002974uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2975 MemTxAttrs attrs, MemTxResult *result)
2976{
2977 return address_space_ldq_internal(as, addr, attrs, result,
2978 DEVICE_NATIVE_ENDIAN);
2979}
2980
2981uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2982 MemTxAttrs attrs, MemTxResult *result)
2983{
2984 return address_space_ldq_internal(as, addr, attrs, result,
2985 DEVICE_LITTLE_ENDIAN);
2986}
2987
2988uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2989 MemTxAttrs attrs, MemTxResult *result)
2990{
2991 return address_space_ldq_internal(as, addr, attrs, result,
2992 DEVICE_BIG_ENDIAN);
2993}
2994
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002995uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002996{
Peter Maydell50013112015-04-26 16:49:24 +01002997 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002998}
2999
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003000uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003001{
Peter Maydell50013112015-04-26 16:49:24 +01003002 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003003}
3004
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003005uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003006{
Peter Maydell50013112015-04-26 16:49:24 +01003007 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003008}
3009
bellardaab33092005-10-30 20:48:42 +00003010/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003011uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3012 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003013{
3014 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003015 MemTxResult r;
3016
3017 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3018 if (result) {
3019 *result = r;
3020 }
bellardaab33092005-10-30 20:48:42 +00003021 return val;
3022}
3023
Peter Maydell50013112015-04-26 16:49:24 +01003024uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3025{
3026 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3027}
3028
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003029/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003030static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3031 hwaddr addr,
3032 MemTxAttrs attrs,
3033 MemTxResult *result,
3034 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003035{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003036 uint8_t *ptr;
3037 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003038 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003039 hwaddr l = 2;
3040 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003041 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003042 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003043
Paolo Bonzini41063e12015-03-18 14:21:43 +01003044 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003045 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003046 false);
3047 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003048 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003049
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003050 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003051 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003052#if defined(TARGET_WORDS_BIGENDIAN)
3053 if (endian == DEVICE_LITTLE_ENDIAN) {
3054 val = bswap16(val);
3055 }
3056#else
3057 if (endian == DEVICE_BIG_ENDIAN) {
3058 val = bswap16(val);
3059 }
3060#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003061 } else {
3062 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003063 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003064 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003065 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003066 switch (endian) {
3067 case DEVICE_LITTLE_ENDIAN:
3068 val = lduw_le_p(ptr);
3069 break;
3070 case DEVICE_BIG_ENDIAN:
3071 val = lduw_be_p(ptr);
3072 break;
3073 default:
3074 val = lduw_p(ptr);
3075 break;
3076 }
Peter Maydell50013112015-04-26 16:49:24 +01003077 r = MEMTX_OK;
3078 }
3079 if (result) {
3080 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003081 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003082 if (release_lock) {
3083 qemu_mutex_unlock_iothread();
3084 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003085 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003086 return val;
bellardaab33092005-10-30 20:48:42 +00003087}
3088
Peter Maydell50013112015-04-26 16:49:24 +01003089uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3090 MemTxAttrs attrs, MemTxResult *result)
3091{
3092 return address_space_lduw_internal(as, addr, attrs, result,
3093 DEVICE_NATIVE_ENDIAN);
3094}
3095
3096uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3097 MemTxAttrs attrs, MemTxResult *result)
3098{
3099 return address_space_lduw_internal(as, addr, attrs, result,
3100 DEVICE_LITTLE_ENDIAN);
3101}
3102
3103uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3104 MemTxAttrs attrs, MemTxResult *result)
3105{
3106 return address_space_lduw_internal(as, addr, attrs, result,
3107 DEVICE_BIG_ENDIAN);
3108}
3109
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003110uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003111{
Peter Maydell50013112015-04-26 16:49:24 +01003112 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003113}
3114
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003115uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003116{
Peter Maydell50013112015-04-26 16:49:24 +01003117 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003118}
3119
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003120uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003121{
Peter Maydell50013112015-04-26 16:49:24 +01003122 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003123}
3124
bellard8df1cd02005-01-28 22:37:22 +00003125/* warning: addr must be aligned. The ram page is not masked as dirty
3126 and the code inside is not invalidated. It is useful if the dirty
3127 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003128void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3129 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003130{
bellard8df1cd02005-01-28 22:37:22 +00003131 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003132 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003133 hwaddr l = 4;
3134 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003135 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003136 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003137 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003138
Paolo Bonzini41063e12015-03-18 14:21:43 +01003139 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003140 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003141 true);
3142 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003143 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003144
Peter Maydell50013112015-04-26 16:49:24 +01003145 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003146 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003147 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003148 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003149 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003150
Paolo Bonzini845b6212015-03-23 11:45:53 +01003151 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3152 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003153 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003154 r = MEMTX_OK;
3155 }
3156 if (result) {
3157 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003158 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003159 if (release_lock) {
3160 qemu_mutex_unlock_iothread();
3161 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003162 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003163}
3164
Peter Maydell50013112015-04-26 16:49:24 +01003165void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3166{
3167 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3168}
3169
bellard8df1cd02005-01-28 22:37:22 +00003170/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003171static inline void address_space_stl_internal(AddressSpace *as,
3172 hwaddr addr, uint32_t val,
3173 MemTxAttrs attrs,
3174 MemTxResult *result,
3175 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003176{
bellard8df1cd02005-01-28 22:37:22 +00003177 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003178 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003179 hwaddr l = 4;
3180 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003181 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003182 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003183
Paolo Bonzini41063e12015-03-18 14:21:43 +01003184 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003185 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003186 true);
3187 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003188 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003189
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003190#if defined(TARGET_WORDS_BIGENDIAN)
3191 if (endian == DEVICE_LITTLE_ENDIAN) {
3192 val = bswap32(val);
3193 }
3194#else
3195 if (endian == DEVICE_BIG_ENDIAN) {
3196 val = bswap32(val);
3197 }
3198#endif
Peter Maydell50013112015-04-26 16:49:24 +01003199 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003200 } else {
bellard8df1cd02005-01-28 22:37:22 +00003201 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003202 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003203 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003204 switch (endian) {
3205 case DEVICE_LITTLE_ENDIAN:
3206 stl_le_p(ptr, val);
3207 break;
3208 case DEVICE_BIG_ENDIAN:
3209 stl_be_p(ptr, val);
3210 break;
3211 default:
3212 stl_p(ptr, val);
3213 break;
3214 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003215 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003216 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003217 }
Peter Maydell50013112015-04-26 16:49:24 +01003218 if (result) {
3219 *result = r;
3220 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003221 if (release_lock) {
3222 qemu_mutex_unlock_iothread();
3223 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003224 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003225}
3226
3227void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3228 MemTxAttrs attrs, MemTxResult *result)
3229{
3230 address_space_stl_internal(as, addr, val, attrs, result,
3231 DEVICE_NATIVE_ENDIAN);
3232}
3233
3234void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3235 MemTxAttrs attrs, MemTxResult *result)
3236{
3237 address_space_stl_internal(as, addr, val, attrs, result,
3238 DEVICE_LITTLE_ENDIAN);
3239}
3240
3241void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3242 MemTxAttrs attrs, MemTxResult *result)
3243{
3244 address_space_stl_internal(as, addr, val, attrs, result,
3245 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003246}
3247
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003248void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003249{
Peter Maydell50013112015-04-26 16:49:24 +01003250 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003251}
3252
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003253void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003254{
Peter Maydell50013112015-04-26 16:49:24 +01003255 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003256}
3257
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003258void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003259{
Peter Maydell50013112015-04-26 16:49:24 +01003260 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003261}
3262
bellardaab33092005-10-30 20:48:42 +00003263/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003264void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3265 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003266{
3267 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003268 MemTxResult r;
3269
3270 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3271 if (result) {
3272 *result = r;
3273 }
3274}
3275
3276void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3277{
3278 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003279}
3280
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003281/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003282static inline void address_space_stw_internal(AddressSpace *as,
3283 hwaddr addr, uint32_t val,
3284 MemTxAttrs attrs,
3285 MemTxResult *result,
3286 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003287{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003288 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003289 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003290 hwaddr l = 2;
3291 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003292 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003293 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003294
Paolo Bonzini41063e12015-03-18 14:21:43 +01003295 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003296 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003297 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003298 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003299
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003300#if defined(TARGET_WORDS_BIGENDIAN)
3301 if (endian == DEVICE_LITTLE_ENDIAN) {
3302 val = bswap16(val);
3303 }
3304#else
3305 if (endian == DEVICE_BIG_ENDIAN) {
3306 val = bswap16(val);
3307 }
3308#endif
Peter Maydell50013112015-04-26 16:49:24 +01003309 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003310 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003311 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003312 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003313 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003314 switch (endian) {
3315 case DEVICE_LITTLE_ENDIAN:
3316 stw_le_p(ptr, val);
3317 break;
3318 case DEVICE_BIG_ENDIAN:
3319 stw_be_p(ptr, val);
3320 break;
3321 default:
3322 stw_p(ptr, val);
3323 break;
3324 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003325 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003326 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003327 }
Peter Maydell50013112015-04-26 16:49:24 +01003328 if (result) {
3329 *result = r;
3330 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003331 if (release_lock) {
3332 qemu_mutex_unlock_iothread();
3333 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003334 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003335}
3336
3337void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3338 MemTxAttrs attrs, MemTxResult *result)
3339{
3340 address_space_stw_internal(as, addr, val, attrs, result,
3341 DEVICE_NATIVE_ENDIAN);
3342}
3343
3344void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3345 MemTxAttrs attrs, MemTxResult *result)
3346{
3347 address_space_stw_internal(as, addr, val, attrs, result,
3348 DEVICE_LITTLE_ENDIAN);
3349}
3350
3351void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3352 MemTxAttrs attrs, MemTxResult *result)
3353{
3354 address_space_stw_internal(as, addr, val, attrs, result,
3355 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003356}
3357
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003358void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003359{
Peter Maydell50013112015-04-26 16:49:24 +01003360 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003361}
3362
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003363void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364{
Peter Maydell50013112015-04-26 16:49:24 +01003365 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366}
3367
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003368void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369{
Peter Maydell50013112015-04-26 16:49:24 +01003370 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003371}
3372
bellardaab33092005-10-30 20:48:42 +00003373/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003374void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3375 MemTxAttrs attrs, MemTxResult *result)
3376{
3377 MemTxResult r;
3378 val = tswap64(val);
3379 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3380 if (result) {
3381 *result = r;
3382 }
3383}
3384
3385void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3386 MemTxAttrs attrs, MemTxResult *result)
3387{
3388 MemTxResult r;
3389 val = cpu_to_le64(val);
3390 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3391 if (result) {
3392 *result = r;
3393 }
3394}
3395void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3396 MemTxAttrs attrs, MemTxResult *result)
3397{
3398 MemTxResult r;
3399 val = cpu_to_be64(val);
3400 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3401 if (result) {
3402 *result = r;
3403 }
3404}
3405
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003406void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003407{
Peter Maydell50013112015-04-26 16:49:24 +01003408 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003409}
3410
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003411void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003412{
Peter Maydell50013112015-04-26 16:49:24 +01003413 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003414}
3415
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003416void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003417{
Peter Maydell50013112015-04-26 16:49:24 +01003418 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003419}
3420
aliguori5e2972f2009-03-28 17:51:36 +00003421/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003422int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003423 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003424{
3425 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003426 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003427 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003428
3429 while (len > 0) {
3430 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003431 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003432 /* if no physical page mapped, return an error */
3433 if (phys_addr == -1)
3434 return -1;
3435 l = (page + TARGET_PAGE_SIZE) - addr;
3436 if (l > len)
3437 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003438 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003439 if (is_write) {
3440 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3441 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003442 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3443 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003444 }
bellard13eb76e2004-01-24 15:23:36 +00003445 len -= l;
3446 buf += l;
3447 addr += l;
3448 }
3449 return 0;
3450}
Paul Brooka68fe892010-03-01 00:08:59 +00003451#endif
bellard13eb76e2004-01-24 15:23:36 +00003452
Blue Swirl8e4a4242013-01-06 18:30:17 +00003453/*
3454 * A helper function for the _utterly broken_ virtio device model to find out if
3455 * it's running on a big endian machine. Don't do this at home kids!
3456 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003457bool target_words_bigendian(void);
3458bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003459{
3460#if defined(TARGET_WORDS_BIGENDIAN)
3461 return true;
3462#else
3463 return false;
3464#endif
3465}
3466
Wen Congyang76f35532012-05-07 12:04:18 +08003467#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003468bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003469{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003470 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003471 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003472 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003473
Paolo Bonzini41063e12015-03-18 14:21:43 +01003474 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003475 mr = address_space_translate(&address_space_memory,
3476 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003477
Paolo Bonzini41063e12015-03-18 14:21:43 +01003478 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3479 rcu_read_unlock();
3480 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003481}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003482
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003483int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003484{
3485 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003486 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003487
Mike Day0dc3f442013-09-05 14:41:35 -04003488 rcu_read_lock();
3489 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003490 ret = func(block->idstr, block->host, block->offset,
3491 block->used_length, opaque);
3492 if (ret) {
3493 break;
3494 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003495 }
Mike Day0dc3f442013-09-05 14:41:35 -04003496 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003497 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003498}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003499#endif