blob: 448882154fdeb8c930241c99a651bb5a7a0fed9b [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010027#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000028#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020029#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020031#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010032#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010033#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020042#include "hw/hw.h"
43#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010044#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020045#include "sysemu/dma.h"
46#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010047#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010048#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000049#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010050#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040051#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020052#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030054#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000055
Paolo Bonzini022c62c2012-12-17 18:19:49 +010056#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020057#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030058#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020059
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020060#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030061#ifndef _WIN32
62#include "qemu/mmap-alloc.h"
63#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020064
blueswir1db7b5422007-05-26 17:36:03 +000065//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000066
pbrook99773bd2006-04-16 15:14:59 +000067#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040068/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
69 * are protected by the ramlist lock.
70 */
Mike Day0d53d9f2015-01-21 13:45:24 +010071RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030072
73static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030074static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030075
Avi Kivityf6790af2012-10-02 20:13:51 +020076AddressSpace address_space_io;
77AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020078
Paolo Bonzini0844e002013-05-24 14:37:28 +020079MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020080static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020081
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080082/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
83#define RAM_PREALLOC (1 << 0)
84
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080085/* RAM is mmap-ed with MAP_SHARED */
86#define RAM_SHARED (1 << 1)
87
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020088/* Only a portion of RAM (used_length) is actually used, and migrated.
89 * This used_length size can change across reboots.
90 */
91#define RAM_RESIZEABLE (1 << 2)
92
pbrooke2eef172008-06-08 01:09:01 +000093#endif
bellard9fa3e852004-01-04 18:06:42 +000094
Andreas Färberbdc44642013-06-24 23:50:24 +020095struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000096/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020098__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000099/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000100 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000101 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100102int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000103
pbrooke2eef172008-06-08 01:09:01 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106typedef struct PhysPageEntry PhysPageEntry;
107
108struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200113};
114
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200115#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100118#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200120#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121#define P_L2_SIZE (1 << P_L2_BITS)
122
123#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100128 struct rcu_head rcu;
129
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100139 struct rcu_head rcu;
140
Fam Zheng729633c2016-03-01 14:18:24 +0800141 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Fam Zheng29cb5332016-03-01 14:18:23 +0800314static inline bool section_covers_addr(const MemoryRegionSection *section,
315 hwaddr addr)
316{
317 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
318 * the section must cover the entire address space.
319 */
320 return section->size.hi ||
321 range_covers_byte(section->offset_within_address_space,
322 section->size.lo, addr);
323}
324
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200325static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200326 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000327{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200328 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200329 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200330 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200331
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200332 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200333 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200334 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200335 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200336 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100337 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200338 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200339
Fam Zheng29cb5332016-03-01 14:18:23 +0800340 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200341 return &sections[lp.ptr];
342 } else {
343 return &sections[PHYS_SECTION_UNASSIGNED];
344 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200345}
346
Blue Swirle5548612012-04-21 13:08:33 +0000347bool memory_region_is_unassigned(MemoryRegion *mr)
348{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200349 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000350 && mr != &io_mem_watch;
351}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200352
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100353/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200354static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200355 hwaddr addr,
356 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200357{
Fam Zheng729633c2016-03-01 14:18:24 +0800358 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200359 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800360 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200361
Fam Zheng729633c2016-03-01 14:18:24 +0800362 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
363 section_covers_addr(section, addr)) {
364 update = false;
365 } else {
366 section = phys_page_find(d->phys_map, addr, d->map.nodes,
367 d->map.sections);
368 update = true;
369 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200370 if (resolve_subpage && section->mr->subpage) {
371 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200372 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 }
Fam Zheng729633c2016-03-01 14:18:24 +0800374 if (update) {
375 atomic_set(&d->mru_section, section);
376 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200377 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200378}
379
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100380/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200381static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200382address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200383 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200384{
385 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100387 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200388
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 /* Compute offset within MemoryRegionSection */
391 addr -= section->offset_within_address_space;
392
393 /* Compute offset within MemoryRegion */
394 *xlat = addr + section->offset_within_region;
395
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200396 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200397
398 /* MMIO registers can be expected to perform full-width accesses based only
399 * on their address, without considering adjacent registers that could
400 * decode to completely different MemoryRegions. When such registers
401 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
402 * regions overlap wildly. For this reason we cannot clamp the accesses
403 * here.
404 *
405 * If the length is small (as is the case for address_space_ldl/stl),
406 * everything works fine. If the incoming length is large, however,
407 * the caller really has to do the clamping through memory_access_size.
408 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200409 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200410 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200411 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
412 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200413 return section;
414}
Jan Kiszka90260c62013-05-26 21:46:51 +0200415
Paolo Bonzini41063e12015-03-18 14:21:43 +0100416/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200417MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
418 hwaddr *xlat, hwaddr *plen,
419 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200420{
Avi Kivity30951152012-10-30 13:47:46 +0200421 IOMMUTLBEntry iotlb;
422 MemoryRegionSection *section;
423 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200424
425 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
427 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200428 mr = section->mr;
429
430 if (!mr->iommu_ops) {
431 break;
432 }
433
Le Tan8d7b8cb2014-08-16 13:55:37 +0800434 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200435 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
436 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700437 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200438 if (!(iotlb.perm & (1 << is_write))) {
439 mr = &io_mem_unassigned;
440 break;
441 }
442
443 as = iotlb.target_as;
444 }
445
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000446 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100447 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700448 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100449 }
450
Avi Kivity30951152012-10-30 13:47:46 +0200451 *xlat = addr;
452 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200453}
454
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100455/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200456MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000457address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200458 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200459{
Avi Kivity30951152012-10-30 13:47:46 +0200460 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000461 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
462
463 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200464
465 assert(!section->mr->iommu_ops);
466 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200467}
bellard9fa3e852004-01-04 18:06:42 +0000468#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000469
Andreas Färberb170fce2013-01-20 20:23:22 +0100470#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000471
Juan Quintelae59fb372009-09-29 22:48:21 +0200472static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473{
Andreas Färber259186a2013-01-17 18:51:17 +0100474 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200475
aurel323098dba2009-03-07 21:28:24 +0000476 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
477 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100478 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100479 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000480
481 return 0;
482}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200483
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400484static int cpu_common_pre_load(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489
490 return 0;
491}
492
493static bool cpu_common_exception_index_needed(void *opaque)
494{
495 CPUState *cpu = opaque;
496
Paolo Bonziniadee6422014-12-19 12:53:14 +0100497 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400498}
499
500static const VMStateDescription vmstate_cpu_common_exception_index = {
501 .name = "cpu_common/exception_index",
502 .version_id = 1,
503 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200504 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400505 .fields = (VMStateField[]) {
506 VMSTATE_INT32(exception_index, CPUState),
507 VMSTATE_END_OF_LIST()
508 }
509};
510
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300511static bool cpu_common_crash_occurred_needed(void *opaque)
512{
513 CPUState *cpu = opaque;
514
515 return cpu->crash_occurred;
516}
517
518static const VMStateDescription vmstate_cpu_common_crash_occurred = {
519 .name = "cpu_common/crash_occurred",
520 .version_id = 1,
521 .minimum_version_id = 1,
522 .needed = cpu_common_crash_occurred_needed,
523 .fields = (VMStateField[]) {
524 VMSTATE_BOOL(crash_occurred, CPUState),
525 VMSTATE_END_OF_LIST()
526 }
527};
528
Andreas Färber1a1562f2013-06-17 04:09:11 +0200529const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200530 .name = "cpu_common",
531 .version_id = 1,
532 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200534 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200535 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100536 VMSTATE_UINT32(halted, CPUState),
537 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200538 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400539 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200540 .subsections = (const VMStateDescription*[]) {
541 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300542 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200544 }
545};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200546
pbrook9656f322008-07-01 20:01:19 +0000547#endif
548
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100549CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400550{
Andreas Färberbdc44642013-06-24 23:50:24 +0200551 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400552
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100554 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200555 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 }
Glauber Costa950f1472009-06-09 12:15:18 -0400557 }
558
Andreas Färberbdc44642013-06-24 23:50:24 +0200559 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400560}
561
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000563void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000564{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000565 CPUAddressSpace *newas;
566
567 /* Target code should have set num_ases before calling us */
568 assert(asidx < cpu->num_ases);
569
Peter Maydell56943e82016-01-21 14:15:04 +0000570 if (asidx == 0) {
571 /* address space 0 gets the convenience alias */
572 cpu->as = as;
573 }
574
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000575 /* KVM cannot currently support multiple address spaces. */
576 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 if (!cpu->cpu_ases) {
579 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580 }
Peter Maydell32857f42015-10-01 15:29:50 +0100581
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000582 newas = &cpu->cpu_ases[asidx];
583 newas->cpu = cpu;
584 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000585 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000586 newas->tcg_as_listener.commit = tcg_commit;
587 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000588 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000589}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000590
591AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
592{
593 /* Return the AddressSpace corresponding to the specified index */
594 return cpu->cpu_ases[asidx].as;
595}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000596#endif
597
Bharata B Raob7bca732015-06-23 19:31:13 -0700598#ifndef CONFIG_USER_ONLY
599static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
600
601static int cpu_get_free_index(Error **errp)
602{
603 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
604
605 if (cpu >= MAX_CPUMASK_BITS) {
606 error_setg(errp, "Trying to use more CPUs than max of %d",
607 MAX_CPUMASK_BITS);
608 return -1;
609 }
610
611 bitmap_set(cpu_index_map, cpu, 1);
612 return cpu;
613}
614
615void cpu_exec_exit(CPUState *cpu)
616{
617 if (cpu->cpu_index == -1) {
618 /* cpu_index was never allocated by this @cpu or was already freed. */
619 return;
620 }
621
622 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
623 cpu->cpu_index = -1;
624}
625#else
626
627static int cpu_get_free_index(Error **errp)
628{
629 CPUState *some_cpu;
630 int cpu_index = 0;
631
632 CPU_FOREACH(some_cpu) {
633 cpu_index++;
634 }
635 return cpu_index;
636}
637
638void cpu_exec_exit(CPUState *cpu)
639{
640}
641#endif
642
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700643void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000644{
Andreas Färberb170fce2013-01-20 20:23:22 +0100645 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700646 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000647
Peter Maydell56943e82016-01-21 14:15:04 +0000648 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000649 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000650
Eduardo Habkost291135b2015-04-27 17:00:33 -0300651#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300652 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000653
654 /* This is a softmmu CPU object, so create a property for it
655 * so users can wire up its memory. (This can't go in qom/cpu.c
656 * because that file is compiled only once for both user-mode
657 * and system builds.) The default if no link is set up is to use
658 * the system address space.
659 */
660 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
661 (Object **)&cpu->memory,
662 qdev_prop_allow_set_link_before_realize,
663 OBJ_PROP_LINK_UNREF_ON_RELEASE,
664 &error_abort);
665 cpu->memory = system_memory;
666 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300667#endif
668
pbrookc2764712009-03-07 15:24:59 +0000669#if defined(CONFIG_USER_ONLY)
670 cpu_list_lock();
671#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200672 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700673 if (local_err) {
674 error_propagate(errp, local_err);
675#if defined(CONFIG_USER_ONLY)
676 cpu_list_unlock();
677#endif
678 return;
bellard6a00d602005-11-21 23:25:50 +0000679 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200680 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000681#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000683 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200685 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200686 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200687 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100688 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200689 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100690 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200691#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000692}
693
Paul Brook94df27f2010-02-28 23:47:45 +0000694#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200695static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000696{
697 tb_invalidate_phys_page_range(pc, pc + 1, 0);
698}
699#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200700static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400701{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000702 MemTxAttrs attrs;
703 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
704 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400705 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000706 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100707 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400708 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400709}
bellardc27004e2005-01-03 23:35:10 +0000710#endif
bellardd720b932004-04-25 17:57:43 +0000711
Paul Brookc527ee82010-03-01 03:31:14 +0000712#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200713void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000714
715{
716}
717
Peter Maydell3ee887e2014-09-12 14:06:48 +0100718int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
719 int flags)
720{
721 return -ENOSYS;
722}
723
724void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
725{
726}
727
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000729 int flags, CPUWatchpoint **watchpoint)
730{
731 return -ENOSYS;
732}
733#else
pbrook6658ffb2007-03-16 23:58:11 +0000734/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200735int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000736 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000737{
aliguoric0ce9982008-11-25 22:13:57 +0000738 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000739
Peter Maydell05068c02014-09-12 14:06:48 +0100740 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700741 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200742 error_report("tried to set invalid watchpoint at %"
743 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000744 return -EINVAL;
745 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500746 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000747
aliguoria1d1bb32008-11-18 20:07:32 +0000748 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100749 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000750 wp->flags = flags;
751
aliguori2dc9f412008-11-18 20:56:59 +0000752 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200753 if (flags & BP_GDB) {
754 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
755 } else {
756 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
757 }
aliguoria1d1bb32008-11-18 20:07:32 +0000758
Andreas Färber31b030d2013-09-04 01:29:02 +0200759 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000760
761 if (watchpoint)
762 *watchpoint = wp;
763 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000764}
765
aliguoria1d1bb32008-11-18 20:07:32 +0000766/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200767int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000768 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000769{
aliguoria1d1bb32008-11-18 20:07:32 +0000770 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000771
Andreas Färberff4700b2013-08-26 18:23:18 +0200772 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100773 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000774 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200775 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000776 return 0;
777 }
778 }
aliguoria1d1bb32008-11-18 20:07:32 +0000779 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000780}
781
aliguoria1d1bb32008-11-18 20:07:32 +0000782/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200783void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000784{
Andreas Färberff4700b2013-08-26 18:23:18 +0200785 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000786
Andreas Färber31b030d2013-09-04 01:29:02 +0200787 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000788
Anthony Liguori7267c092011-08-20 22:09:37 -0500789 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000790}
791
aliguoria1d1bb32008-11-18 20:07:32 +0000792/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200793void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000794{
aliguoric0ce9982008-11-25 22:13:57 +0000795 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000796
Andreas Färberff4700b2013-08-26 18:23:18 +0200797 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200798 if (wp->flags & mask) {
799 cpu_watchpoint_remove_by_ref(cpu, wp);
800 }
aliguoric0ce9982008-11-25 22:13:57 +0000801 }
aliguoria1d1bb32008-11-18 20:07:32 +0000802}
Peter Maydell05068c02014-09-12 14:06:48 +0100803
804/* Return true if this watchpoint address matches the specified
805 * access (ie the address range covered by the watchpoint overlaps
806 * partially or completely with the address range covered by the
807 * access).
808 */
809static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
810 vaddr addr,
811 vaddr len)
812{
813 /* We know the lengths are non-zero, but a little caution is
814 * required to avoid errors in the case where the range ends
815 * exactly at the top of the address space and so addr + len
816 * wraps round to zero.
817 */
818 vaddr wpend = wp->vaddr + wp->len - 1;
819 vaddr addrend = addr + len - 1;
820
821 return !(addr > wpend || wp->vaddr > addrend);
822}
823
Paul Brookc527ee82010-03-01 03:31:14 +0000824#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000825
826/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200827int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000828 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000829{
aliguoric0ce9982008-11-25 22:13:57 +0000830 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000831
Anthony Liguori7267c092011-08-20 22:09:37 -0500832 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000833
834 bp->pc = pc;
835 bp->flags = flags;
836
aliguori2dc9f412008-11-18 20:56:59 +0000837 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200839 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200840 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200842 }
aliguoria1d1bb32008-11-18 20:07:32 +0000843
Andreas Färberf0c3c502013-08-26 21:22:53 +0200844 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000845
Andreas Färber00b941e2013-06-29 18:55:54 +0200846 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000847 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200848 }
aliguoria1d1bb32008-11-18 20:07:32 +0000849 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000850}
851
852/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200853int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000854{
aliguoria1d1bb32008-11-18 20:07:32 +0000855 CPUBreakpoint *bp;
856
Andreas Färberf0c3c502013-08-26 21:22:53 +0200857 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000858 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200859 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000860 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000861 }
bellard4c3a88a2003-07-26 12:06:08 +0000862 }
aliguoria1d1bb32008-11-18 20:07:32 +0000863 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000864}
865
aliguoria1d1bb32008-11-18 20:07:32 +0000866/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200867void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000868{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200869 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
870
871 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000872
Anthony Liguori7267c092011-08-20 22:09:37 -0500873 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000874}
875
876/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200877void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000878{
aliguoric0ce9982008-11-25 22:13:57 +0000879 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000880
Andreas Färberf0c3c502013-08-26 21:22:53 +0200881 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200882 if (bp->flags & mask) {
883 cpu_breakpoint_remove_by_ref(cpu, bp);
884 }
aliguoric0ce9982008-11-25 22:13:57 +0000885 }
bellard4c3a88a2003-07-26 12:06:08 +0000886}
887
bellardc33a3462003-07-29 20:50:33 +0000888/* enable or disable single step mode. EXCP_DEBUG is returned by the
889 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200890void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000891{
Andreas Färbered2803d2013-06-21 20:20:45 +0200892 if (cpu->singlestep_enabled != enabled) {
893 cpu->singlestep_enabled = enabled;
894 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200895 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200896 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100897 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000898 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700899 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000900 }
bellardc33a3462003-07-29 20:50:33 +0000901 }
bellardc33a3462003-07-29 20:50:33 +0000902}
903
Andreas Färbera47dddd2013-09-03 17:38:47 +0200904void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000905{
906 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000907 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000908
909 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000910 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000911 fprintf(stderr, "qemu: fatal: ");
912 vfprintf(stderr, fmt, ap);
913 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200914 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100915 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000916 qemu_log("qemu: fatal: ");
917 qemu_log_vprintf(fmt, ap2);
918 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200919 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000920 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000921 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000922 }
pbrook493ae1f2007-11-23 16:53:59 +0000923 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000924 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300925 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200926#if defined(CONFIG_USER_ONLY)
927 {
928 struct sigaction act;
929 sigfillset(&act.sa_mask);
930 act.sa_handler = SIG_DFL;
931 sigaction(SIGABRT, &act, NULL);
932 }
933#endif
bellard75012672003-06-21 13:11:07 +0000934 abort();
935}
936
bellard01243112004-01-04 15:48:17 +0000937#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400938/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
940{
941 RAMBlock *block;
942
Paolo Bonzini43771532013-09-09 17:58:40 +0200943 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200944 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200945 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200946 }
Mike Day0dc3f442013-09-05 14:41:35 -0400947 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200948 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 goto found;
950 }
951 }
952
953 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
954 abort();
955
956found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200957 /* It is safe to write mru_block outside the iothread lock. This
958 * is what happens:
959 *
960 * mru_block = xxx
961 * rcu_read_unlock()
962 * xxx removed from list
963 * rcu_read_lock()
964 * read mru_block
965 * mru_block = NULL;
966 * call_rcu(reclaim_ramblock, xxx);
967 * rcu_read_unlock()
968 *
969 * atomic_rcu_set is not needed here. The block was already published
970 * when it was placed into the list. Here we're just making an extra
971 * copy of the pointer.
972 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200973 ram_list.mru_block = block;
974 return block;
975}
976
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200977static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000978{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700979 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200980 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200981 RAMBlock *block;
982 ram_addr_t end;
983
984 end = TARGET_PAGE_ALIGN(start + length);
985 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000986
Mike Day0dc3f442013-09-05 14:41:35 -0400987 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200988 block = qemu_get_ram_block(start);
989 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200990 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700991 CPU_FOREACH(cpu) {
992 tlb_reset_dirty(cpu, start1, length);
993 }
Mike Day0dc3f442013-09-05 14:41:35 -0400994 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200995}
996
997/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000998bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
999 ram_addr_t length,
1000 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001001{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001002 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001003 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001004 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001005
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001006 if (length == 0) {
1007 return false;
1008 }
1009
1010 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1011 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001012
1013 rcu_read_lock();
1014
1015 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1016
1017 while (page < end) {
1018 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1019 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1020 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1021
1022 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1023 offset, num);
1024 page += num;
1025 }
1026
1027 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001028
1029 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001030 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001031 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001032
1033 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001034}
1035
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001036/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001037hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001038 MemoryRegionSection *section,
1039 target_ulong vaddr,
1040 hwaddr paddr, hwaddr xlat,
1041 int prot,
1042 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001043{
Avi Kivitya8170e52012-10-23 12:30:10 +02001044 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001045 CPUWatchpoint *wp;
1046
Blue Swirlcc5bea62012-04-14 14:56:48 +00001047 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001048 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001049 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001050 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001051 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001052 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001053 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001054 }
1055 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001056 AddressSpaceDispatch *d;
1057
1058 d = atomic_rcu_read(&section->address_space->dispatch);
1059 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001060 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001061 }
1062
1063 /* Make accesses to pages with watchpoints go via the
1064 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001065 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001066 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001067 /* Avoid trapping reads of pages with a write breakpoint. */
1068 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001069 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001070 *address |= TLB_MMIO;
1071 break;
1072 }
1073 }
1074 }
1075
1076 return iotlb;
1077}
bellard9fa3e852004-01-04 18:06:42 +00001078#endif /* defined(CONFIG_USER_ONLY) */
1079
pbrooke2eef172008-06-08 01:09:01 +00001080#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001081
Anthony Liguoric227f092009-10-01 16:12:16 -05001082static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001083 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001084static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001085
Igor Mammedova2b257d2014-10-31 16:38:37 +00001086static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1087 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001088
1089/*
1090 * Set a custom physical guest memory alloator.
1091 * Accelerators with unusual needs may need this. Hopefully, we can
1092 * get rid of it eventually.
1093 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001094void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001095{
1096 phys_mem_alloc = alloc;
1097}
1098
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099static uint16_t phys_section_add(PhysPageMap *map,
1100 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001101{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001102 /* The physical section number is ORed with a page-aligned
1103 * pointer to produce the iotlb entries. Thus it should
1104 * never overflow into the page-aligned value.
1105 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001107
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 if (map->sections_nb == map->sections_nb_alloc) {
1109 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1110 map->sections = g_renew(MemoryRegionSection, map->sections,
1111 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001112 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001113 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001114 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001115 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001116}
1117
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001118static void phys_section_destroy(MemoryRegion *mr)
1119{
Don Slutz55b4e802015-11-30 17:11:04 -05001120 bool have_sub_page = mr->subpage;
1121
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001122 memory_region_unref(mr);
1123
Don Slutz55b4e802015-11-30 17:11:04 -05001124 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001125 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001126 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001127 g_free(subpage);
1128 }
1129}
1130
Paolo Bonzini60926662013-05-29 12:30:26 +02001131static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001132{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001133 while (map->sections_nb > 0) {
1134 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001135 phys_section_destroy(section->mr);
1136 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001137 g_free(map->sections);
1138 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001139}
1140
Avi Kivityac1970f2012-10-03 16:22:53 +02001141static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142{
1143 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001144 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001146 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001147 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148 MemoryRegionSection subsection = {
1149 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001150 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001152 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001153
Avi Kivityf3705d52012-03-08 16:16:34 +02001154 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001155
Avi Kivityf3705d52012-03-08 16:16:34 +02001156 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001157 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001158 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001160 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001161 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001163 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001164 }
1165 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001166 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001167 subpage_register(subpage, start, end,
1168 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001169}
1170
1171
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001172static void register_multipage(AddressSpaceDispatch *d,
1173 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001174{
Avi Kivitya8170e52012-10-23 12:30:10 +02001175 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001176 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001177 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1178 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001179
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001180 assert(num_pages);
1181 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001182}
1183
Avi Kivityac1970f2012-10-03 16:22:53 +02001184static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001185{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001186 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001187 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001188 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001189 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001190
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001191 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1192 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1193 - now.offset_within_address_space;
1194
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001196 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001199 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001200 while (int128_ne(remain.size, now.size)) {
1201 remain.size = int128_sub(remain.size, now.size);
1202 remain.offset_within_address_space += int128_get64(now.size);
1203 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001204 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001206 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001207 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001208 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001209 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001210 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001211 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001212 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001213 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001214 }
1215}
1216
Sheng Yang62a27442010-01-26 19:21:16 +08001217void qemu_flush_coalesced_mmio_buffer(void)
1218{
1219 if (kvm_enabled())
1220 kvm_flush_coalesced_mmio_buffer();
1221}
1222
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001223void qemu_mutex_lock_ramlist(void)
1224{
1225 qemu_mutex_lock(&ram_list.mutex);
1226}
1227
1228void qemu_mutex_unlock_ramlist(void)
1229{
1230 qemu_mutex_unlock(&ram_list.mutex);
1231}
1232
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001233#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001234static void *file_ram_alloc(RAMBlock *block,
1235 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001236 const char *path,
1237 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001238{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001239 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001240 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001241 char *sanitized_name;
1242 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001243 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001244 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001245 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001246
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001247 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1248 error_setg(errp,
1249 "host lacks kvm mmu notifiers, -mem-path unsupported");
1250 return NULL;
1251 }
1252
1253 for (;;) {
1254 fd = open(path, O_RDWR);
1255 if (fd >= 0) {
1256 /* @path names an existing file, use it */
1257 break;
1258 }
1259 if (errno == ENOENT) {
1260 /* @path names a file that doesn't exist, create it */
1261 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1262 if (fd >= 0) {
1263 unlink_on_error = true;
1264 break;
1265 }
1266 } else if (errno == EISDIR) {
1267 /* @path names a directory, create a file there */
1268 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1269 sanitized_name = g_strdup(memory_region_name(block->mr));
1270 for (c = sanitized_name; *c != '\0'; c++) {
1271 if (*c == '/') {
1272 *c = '_';
1273 }
1274 }
1275
1276 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1277 sanitized_name);
1278 g_free(sanitized_name);
1279
1280 fd = mkstemp(filename);
1281 if (fd >= 0) {
1282 unlink(filename);
1283 g_free(filename);
1284 break;
1285 }
1286 g_free(filename);
1287 }
1288 if (errno != EEXIST && errno != EINTR) {
1289 error_setg_errno(errp, errno,
1290 "can't open backing store %s for guest RAM",
1291 path);
1292 goto error;
1293 }
1294 /*
1295 * Try again on EINTR and EEXIST. The latter happens when
1296 * something else creates the file between our two open().
1297 */
1298 }
1299
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001300 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001301 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001302
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001303 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001304 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001305 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001306 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001307 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001308 }
1309
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001310 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001311
1312 /*
1313 * ftruncate is not supported by hugetlbfs in older
1314 * hosts, so don't bother bailing out on errors.
1315 * If anything goes wrong with it under other filesystems,
1316 * mmap will fail.
1317 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001318 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001319 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001320 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001321
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001322 area = qemu_ram_mmap(fd, memory, block->mr->align,
1323 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001324 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001325 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001326 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001327 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001328 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001329
1330 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001331 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001332 }
1333
Alex Williamson04b16652010-07-02 11:13:17 -06001334 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001335 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001336
1337error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001338 if (unlink_on_error) {
1339 unlink(path);
1340 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001341 if (fd != -1) {
1342 close(fd);
1343 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001344 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001345}
1346#endif
1347
Mike Day0dc3f442013-09-05 14:41:35 -04001348/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001349static ram_addr_t find_ram_offset(ram_addr_t size)
1350{
Alex Williamson04b16652010-07-02 11:13:17 -06001351 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001352 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001353
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001354 assert(size != 0); /* it would hand out same offset multiple times */
1355
Mike Day0dc3f442013-09-05 14:41:35 -04001356 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001357 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001358 }
Alex Williamson04b16652010-07-02 11:13:17 -06001359
Mike Day0dc3f442013-09-05 14:41:35 -04001360 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001361 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001362
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001363 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001364
Mike Day0dc3f442013-09-05 14:41:35 -04001365 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001366 if (next_block->offset >= end) {
1367 next = MIN(next, next_block->offset);
1368 }
1369 }
1370 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001371 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001372 mingap = next - end;
1373 }
1374 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001375
1376 if (offset == RAM_ADDR_MAX) {
1377 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1378 (uint64_t)size);
1379 abort();
1380 }
1381
Alex Williamson04b16652010-07-02 11:13:17 -06001382 return offset;
1383}
1384
Juan Quintela652d7ec2012-07-20 10:37:54 +02001385ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001386{
Alex Williamsond17b5282010-06-25 11:08:38 -06001387 RAMBlock *block;
1388 ram_addr_t last = 0;
1389
Mike Day0dc3f442013-09-05 14:41:35 -04001390 rcu_read_lock();
1391 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001392 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001393 }
Mike Day0dc3f442013-09-05 14:41:35 -04001394 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001395 return last;
1396}
1397
Jason Baronddb97f12012-08-02 15:44:16 -04001398static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1399{
1400 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001401
1402 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001403 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001404 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1405 if (ret) {
1406 perror("qemu_madvise");
1407 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1408 "but dump_guest_core=off specified\n");
1409 }
1410 }
1411}
1412
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001413const char *qemu_ram_get_idstr(RAMBlock *rb)
1414{
1415 return rb->idstr;
1416}
1417
Mike Dayae3a7042013-09-05 14:41:35 -04001418/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001419void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001420{
Gongleifa53a0e2016-05-10 10:04:59 +08001421 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001422
Avi Kivityc5705a72011-12-20 15:59:12 +02001423 assert(new_block);
1424 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001425
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001426 if (dev) {
1427 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001428 if (id) {
1429 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001430 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001431 }
1432 }
1433 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1434
Gongleiab0a9952016-05-10 10:05:00 +08001435 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001436 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001437 if (block != new_block &&
1438 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001439 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1440 new_block->idstr);
1441 abort();
1442 }
1443 }
Mike Day0dc3f442013-09-05 14:41:35 -04001444 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001445}
1446
Mike Dayae3a7042013-09-05 14:41:35 -04001447/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001448void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001449{
Mike Dayae3a7042013-09-05 14:41:35 -04001450 /* FIXME: arch_init.c assumes that this is not called throughout
1451 * migration. Ignore the problem since hot-unplug during migration
1452 * does not work anyway.
1453 */
Hu Tao20cfe882014-04-02 15:13:26 +08001454 if (block) {
1455 memset(block->idstr, 0, sizeof(block->idstr));
1456 }
1457}
1458
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001459static int memory_try_enable_merging(void *addr, size_t len)
1460{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001461 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001462 /* disabled by the user */
1463 return 0;
1464 }
1465
1466 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1467}
1468
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001469/* Only legal before guest might have detected the memory size: e.g. on
1470 * incoming migration, or right after reset.
1471 *
1472 * As memory core doesn't know how is memory accessed, it is up to
1473 * resize callback to update device state and/or add assertions to detect
1474 * misuse, if necessary.
1475 */
Gongleifa53a0e2016-05-10 10:04:59 +08001476int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001477{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001478 assert(block);
1479
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001480 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001481
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001482 if (block->used_length == newsize) {
1483 return 0;
1484 }
1485
1486 if (!(block->flags & RAM_RESIZEABLE)) {
1487 error_setg_errno(errp, EINVAL,
1488 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1489 " in != 0x" RAM_ADDR_FMT, block->idstr,
1490 newsize, block->used_length);
1491 return -EINVAL;
1492 }
1493
1494 if (block->max_length < newsize) {
1495 error_setg_errno(errp, EINVAL,
1496 "Length too large: %s: 0x" RAM_ADDR_FMT
1497 " > 0x" RAM_ADDR_FMT, block->idstr,
1498 newsize, block->max_length);
1499 return -EINVAL;
1500 }
1501
1502 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1503 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001504 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1505 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001506 memory_region_set_size(block->mr, newsize);
1507 if (block->resized) {
1508 block->resized(block->idstr, newsize, block->host);
1509 }
1510 return 0;
1511}
1512
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001513/* Called with ram_list.mutex held */
1514static void dirty_memory_extend(ram_addr_t old_ram_size,
1515 ram_addr_t new_ram_size)
1516{
1517 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1518 DIRTY_MEMORY_BLOCK_SIZE);
1519 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1520 DIRTY_MEMORY_BLOCK_SIZE);
1521 int i;
1522
1523 /* Only need to extend if block count increased */
1524 if (new_num_blocks <= old_num_blocks) {
1525 return;
1526 }
1527
1528 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1529 DirtyMemoryBlocks *old_blocks;
1530 DirtyMemoryBlocks *new_blocks;
1531 int j;
1532
1533 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1534 new_blocks = g_malloc(sizeof(*new_blocks) +
1535 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1536
1537 if (old_num_blocks) {
1538 memcpy(new_blocks->blocks, old_blocks->blocks,
1539 old_num_blocks * sizeof(old_blocks->blocks[0]));
1540 }
1541
1542 for (j = old_num_blocks; j < new_num_blocks; j++) {
1543 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1544 }
1545
1546 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1547
1548 if (old_blocks) {
1549 g_free_rcu(old_blocks, rcu);
1550 }
1551 }
1552}
1553
Fam Zheng528f46a2016-03-01 14:18:18 +08001554static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001555{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001556 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001557 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001558 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001559 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001560
1561 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001562
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001563 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001564 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001565
1566 if (!new_block->host) {
1567 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001568 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001569 new_block->mr, &err);
1570 if (err) {
1571 error_propagate(errp, err);
1572 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001573 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001574 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001576 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001577 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001578 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001579 error_setg_errno(errp, errno,
1580 "cannot set up guest memory '%s'",
1581 memory_region_name(new_block->mr));
1582 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001583 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001584 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001585 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001586 }
1587 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001588
Li Zhijiandd631692015-07-02 20:18:06 +08001589 new_ram_size = MAX(old_ram_size,
1590 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1591 if (new_ram_size > old_ram_size) {
1592 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001593 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001594 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001595 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1596 * QLIST (which has an RCU-friendly variant) does not have insertion at
1597 * tail, so save the last element in last_block.
1598 */
Mike Day0dc3f442013-09-05 14:41:35 -04001599 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001600 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001601 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001602 break;
1603 }
1604 }
1605 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001606 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001607 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001608 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001609 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001610 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001611 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001612 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001613
Mike Day0dc3f442013-09-05 14:41:35 -04001614 /* Write list before version */
1615 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001616 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001617 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001618
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001619 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001620 new_block->used_length,
1621 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001622
Paolo Bonzinia904c912015-01-21 16:18:35 +01001623 if (new_block->host) {
1624 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1625 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1626 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1627 if (kvm_enabled()) {
1628 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1629 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001630 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001631}
1632
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001633#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001634RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1635 bool share, const char *mem_path,
1636 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001637{
1638 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001639 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001640
1641 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001642 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001643 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001644 }
1645
1646 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1647 /*
1648 * file_ram_alloc() needs to allocate just like
1649 * phys_mem_alloc, but we haven't bothered to provide
1650 * a hook there.
1651 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001652 error_setg(errp,
1653 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001654 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001655 }
1656
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001657 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001658 new_block = g_malloc0(sizeof(*new_block));
1659 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001660 new_block->used_length = size;
1661 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001662 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001663 new_block->host = file_ram_alloc(new_block, size,
1664 mem_path, errp);
1665 if (!new_block->host) {
1666 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001667 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001668 }
1669
Fam Zheng528f46a2016-03-01 14:18:18 +08001670 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001671 if (local_err) {
1672 g_free(new_block);
1673 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001674 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001675 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001676 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001677}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001678#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001679
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001680static
Fam Zheng528f46a2016-03-01 14:18:18 +08001681RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1682 void (*resized)(const char*,
1683 uint64_t length,
1684 void *host),
1685 void *host, bool resizeable,
1686 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001687{
1688 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001689 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001690
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001691 size = HOST_PAGE_ALIGN(size);
1692 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001693 new_block = g_malloc0(sizeof(*new_block));
1694 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001695 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001696 new_block->used_length = size;
1697 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001698 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001699 new_block->fd = -1;
1700 new_block->host = host;
1701 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001702 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001703 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001704 if (resizeable) {
1705 new_block->flags |= RAM_RESIZEABLE;
1706 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001707 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001708 if (local_err) {
1709 g_free(new_block);
1710 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001711 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001712 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001713 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001714}
1715
Fam Zheng528f46a2016-03-01 14:18:18 +08001716RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001717 MemoryRegion *mr, Error **errp)
1718{
1719 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1720}
1721
Fam Zheng528f46a2016-03-01 14:18:18 +08001722RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001723{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001724 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1725}
1726
Fam Zheng528f46a2016-03-01 14:18:18 +08001727RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001728 void (*resized)(const char*,
1729 uint64_t length,
1730 void *host),
1731 MemoryRegion *mr, Error **errp)
1732{
1733 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001734}
bellarde9a1ab12007-02-08 23:08:38 +00001735
Paolo Bonzini43771532013-09-09 17:58:40 +02001736static void reclaim_ramblock(RAMBlock *block)
1737{
1738 if (block->flags & RAM_PREALLOC) {
1739 ;
1740 } else if (xen_enabled()) {
1741 xen_invalidate_map_cache_entry(block->host);
1742#ifndef _WIN32
1743 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001744 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001745 close(block->fd);
1746#endif
1747 } else {
1748 qemu_anon_ram_free(block->host, block->max_length);
1749 }
1750 g_free(block);
1751}
1752
Fam Zhengf1060c52016-03-01 14:18:22 +08001753void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001754{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001755 if (!block) {
1756 return;
1757 }
1758
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001759 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001760 QLIST_REMOVE_RCU(block, next);
1761 ram_list.mru_block = NULL;
1762 /* Write list before version */
1763 smp_wmb();
1764 ram_list.version++;
1765 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001766 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001767}
1768
Huang Yingcd19cfa2011-03-02 08:56:19 +01001769#ifndef _WIN32
1770void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1771{
1772 RAMBlock *block;
1773 ram_addr_t offset;
1774 int flags;
1775 void *area, *vaddr;
1776
Mike Day0dc3f442013-09-05 14:41:35 -04001777 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001778 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001779 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001780 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001781 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001782 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001783 } else if (xen_enabled()) {
1784 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001785 } else {
1786 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001787 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001788 flags |= (block->flags & RAM_SHARED ?
1789 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001790 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1791 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001792 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001793 /*
1794 * Remap needs to match alloc. Accelerators that
1795 * set phys_mem_alloc never remap. If they did,
1796 * we'd need a remap hook here.
1797 */
1798 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1799
Huang Yingcd19cfa2011-03-02 08:56:19 +01001800 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1801 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1802 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001803 }
1804 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001805 fprintf(stderr, "Could not remap addr: "
1806 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001807 length, addr);
1808 exit(1);
1809 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001810 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001811 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001812 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001813 }
1814 }
1815}
1816#endif /* !_WIN32 */
1817
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001818/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001819 * This should not be used for general purpose DMA. Use address_space_map
1820 * or address_space_rw instead. For local memory (e.g. video ram) that the
1821 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001822 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001823 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001824 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001825void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001826{
Gonglei3655cb92016-02-20 10:35:20 +08001827 RAMBlock *block = ram_block;
1828
1829 if (block == NULL) {
1830 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001831 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001832 }
Mike Dayae3a7042013-09-05 14:41:35 -04001833
1834 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001835 /* We need to check if the requested address is in the RAM
1836 * because we don't want to map the entire memory in QEMU.
1837 * In that case just map until the end of the page.
1838 */
1839 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001840 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001841 }
Mike Dayae3a7042013-09-05 14:41:35 -04001842
1843 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001844 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001845 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001846}
1847
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001848/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001849 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001850 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001851 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001852 */
Gonglei3655cb92016-02-20 10:35:20 +08001853static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1854 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001855{
Gonglei3655cb92016-02-20 10:35:20 +08001856 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001857 if (*size == 0) {
1858 return NULL;
1859 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001860
Gonglei3655cb92016-02-20 10:35:20 +08001861 if (block == NULL) {
1862 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001863 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001864 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001865 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001866
1867 if (xen_enabled() && block->host == NULL) {
1868 /* We need to check if the requested address is in the RAM
1869 * because we don't want to map the entire memory in QEMU.
1870 * In that case just map the requested area.
1871 */
1872 if (block->offset == 0) {
1873 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001874 }
1875
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001876 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001877 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001878
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001879 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001880}
1881
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001882/*
1883 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1884 * in that RAMBlock.
1885 *
1886 * ptr: Host pointer to look up
1887 * round_offset: If true round the result offset down to a page boundary
1888 * *ram_addr: set to result ram_addr
1889 * *offset: set to result offset within the RAMBlock
1890 *
1891 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001892 *
1893 * By the time this function returns, the returned pointer is not protected
1894 * by RCU anymore. If the caller is not within an RCU critical section and
1895 * does not hold the iothread lock, it must have other means of protecting the
1896 * pointer, such as a reference to the region that includes the incoming
1897 * ram_addr_t.
1898 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001899RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001900 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001901{
pbrook94a6b542009-04-11 17:15:54 +00001902 RAMBlock *block;
1903 uint8_t *host = ptr;
1904
Jan Kiszka868bb332011-06-21 22:59:09 +02001905 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001906 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001907 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001908 ram_addr = xen_ram_addr_from_mapcache(ptr);
1909 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001910 if (block) {
1911 *offset = (host - block->host);
1912 }
Mike Day0dc3f442013-09-05 14:41:35 -04001913 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001914 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001915 }
1916
Mike Day0dc3f442013-09-05 14:41:35 -04001917 rcu_read_lock();
1918 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001919 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001920 goto found;
1921 }
1922
Mike Day0dc3f442013-09-05 14:41:35 -04001923 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001924 /* This case append when the block is not mapped. */
1925 if (block->host == NULL) {
1926 continue;
1927 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001928 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001929 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001930 }
pbrook94a6b542009-04-11 17:15:54 +00001931 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001932
Mike Day0dc3f442013-09-05 14:41:35 -04001933 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001934 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001935
1936found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001937 *offset = (host - block->host);
1938 if (round_offset) {
1939 *offset &= TARGET_PAGE_MASK;
1940 }
Mike Day0dc3f442013-09-05 14:41:35 -04001941 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001942 return block;
1943}
1944
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001945/*
1946 * Finds the named RAMBlock
1947 *
1948 * name: The name of RAMBlock to find
1949 *
1950 * Returns: RAMBlock (or NULL if not found)
1951 */
1952RAMBlock *qemu_ram_block_by_name(const char *name)
1953{
1954 RAMBlock *block;
1955
1956 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1957 if (!strcmp(name, block->idstr)) {
1958 return block;
1959 }
1960 }
1961
1962 return NULL;
1963}
1964
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001965/* Some of the softmmu routines need to translate from a host pointer
1966 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001967ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001968{
1969 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001970 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001971
Paolo Bonzinif615f392016-05-26 10:07:50 +02001972 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001973 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001974 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001975 }
1976
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001977 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001978}
Alex Williamsonf471a172010-06-11 11:11:42 -06001979
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001980/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001981static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001982 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001983{
Juan Quintela52159192013-10-08 12:44:04 +02001984 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001985 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001986 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001987 switch (size) {
1988 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001989 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001990 break;
1991 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001992 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001993 break;
1994 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001995 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001996 break;
1997 default:
1998 abort();
1999 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002000 /* Set both VGA and migration bits for simplicity and to remove
2001 * the notdirty callback faster.
2002 */
2003 cpu_physical_memory_set_dirty_range(ram_addr, size,
2004 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002005 /* we remove the notdirty callback only if the code has been
2006 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002007 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002008 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002009 }
bellard1ccde1c2004-02-06 19:46:14 +00002010}
2011
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002012static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2013 unsigned size, bool is_write)
2014{
2015 return is_write;
2016}
2017
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002018static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002019 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002020 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002021 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002022};
2023
pbrook0f459d12008-06-09 00:20:13 +00002024/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002025static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002026{
Andreas Färber93afead2013-08-26 03:41:01 +02002027 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002028 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002029 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002030 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002031 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002032 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002033 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002034
Andreas Färberff4700b2013-08-26 18:23:18 +02002035 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002036 /* We re-entered the check after replacing the TB. Now raise
2037 * the debug interrupt so that is will trigger after the
2038 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002039 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002040 return;
2041 }
Andreas Färber93afead2013-08-26 03:41:01 +02002042 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002043 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002044 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2045 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002046 if (flags == BP_MEM_READ) {
2047 wp->flags |= BP_WATCHPOINT_HIT_READ;
2048 } else {
2049 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2050 }
2051 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002052 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002053 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002054 if (wp->flags & BP_CPU &&
2055 !cc->debug_check_watchpoint(cpu, wp)) {
2056 wp->flags &= ~BP_WATCHPOINT_HIT;
2057 continue;
2058 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002059 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002060 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002061 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002062 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002063 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002064 } else {
2065 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002066 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002067 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002068 }
aliguori06d55cc2008-11-18 20:24:06 +00002069 }
aliguori6e140f22008-11-18 20:37:55 +00002070 } else {
2071 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002072 }
2073 }
2074}
2075
pbrook6658ffb2007-03-16 23:58:11 +00002076/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2077 so these check for a hit then pass through to the normal out-of-line
2078 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002079static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2080 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002081{
Peter Maydell66b9b432015-04-26 16:49:24 +01002082 MemTxResult res;
2083 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002084 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2085 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002086
Peter Maydell66b9b432015-04-26 16:49:24 +01002087 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002088 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002089 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002090 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002091 break;
2092 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002093 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002094 break;
2095 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002096 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002097 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002098 default: abort();
2099 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002100 *pdata = data;
2101 return res;
2102}
2103
2104static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2105 uint64_t val, unsigned size,
2106 MemTxAttrs attrs)
2107{
2108 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002109 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2110 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002111
2112 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2113 switch (size) {
2114 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002115 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002116 break;
2117 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002118 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002119 break;
2120 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002121 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002122 break;
2123 default: abort();
2124 }
2125 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002126}
2127
Avi Kivity1ec9b902012-01-02 12:47:48 +02002128static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002129 .read_with_attrs = watch_mem_read,
2130 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002131 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002132};
pbrook6658ffb2007-03-16 23:58:11 +00002133
Peter Maydellf25a49e2015-04-26 16:49:24 +01002134static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2135 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002136{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002137 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002138 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002139 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002140
blueswir1db7b5422007-05-26 17:36:03 +00002141#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002142 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002143 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002144#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002145 res = address_space_read(subpage->as, addr + subpage->base,
2146 attrs, buf, len);
2147 if (res) {
2148 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002149 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 switch (len) {
2151 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002152 *data = ldub_p(buf);
2153 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002154 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002155 *data = lduw_p(buf);
2156 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002157 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002158 *data = ldl_p(buf);
2159 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002160 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002161 *data = ldq_p(buf);
2162 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002163 default:
2164 abort();
2165 }
blueswir1db7b5422007-05-26 17:36:03 +00002166}
2167
Peter Maydellf25a49e2015-04-26 16:49:24 +01002168static MemTxResult subpage_write(void *opaque, hwaddr addr,
2169 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002170{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002171 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002172 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002173
blueswir1db7b5422007-05-26 17:36:03 +00002174#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002175 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 " value %"PRIx64"\n",
2177 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002178#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002179 switch (len) {
2180 case 1:
2181 stb_p(buf, value);
2182 break;
2183 case 2:
2184 stw_p(buf, value);
2185 break;
2186 case 4:
2187 stl_p(buf, value);
2188 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002189 case 8:
2190 stq_p(buf, value);
2191 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002192 default:
2193 abort();
2194 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002195 return address_space_write(subpage->as, addr + subpage->base,
2196 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002197}
2198
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002199static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002200 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002201{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002202 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002203#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002204 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002205 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002206#endif
2207
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002208 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002209 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002210}
2211
Avi Kivity70c68e42012-01-02 12:32:48 +02002212static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002213 .read_with_attrs = subpage_read,
2214 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002215 .impl.min_access_size = 1,
2216 .impl.max_access_size = 8,
2217 .valid.min_access_size = 1,
2218 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002219 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002220 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002221};
2222
Anthony Liguoric227f092009-10-01 16:12:16 -05002223static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002224 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002225{
2226 int idx, eidx;
2227
2228 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2229 return -1;
2230 idx = SUBPAGE_IDX(start);
2231 eidx = SUBPAGE_IDX(end);
2232#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002233 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2234 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002235#endif
blueswir1db7b5422007-05-26 17:36:03 +00002236 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002237 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002238 }
2239
2240 return 0;
2241}
2242
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002243static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002244{
Anthony Liguoric227f092009-10-01 16:12:16 -05002245 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002246
Anthony Liguori7267c092011-08-20 22:09:37 -05002247 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002248
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002249 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002250 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002251 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002252 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002253 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002254#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002255 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2256 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002257#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002258 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002259
2260 return mmio;
2261}
2262
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002263static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2264 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002265{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002266 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002267 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002268 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002269 .mr = mr,
2270 .offset_within_address_space = 0,
2271 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002272 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002273 };
2274
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002275 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002276}
2277
Peter Maydella54c87b2016-01-21 14:15:05 +00002278MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002279{
Peter Maydella54c87b2016-01-21 14:15:05 +00002280 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2281 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002282 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002283 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002284
2285 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002286}
2287
Avi Kivitye9179ce2009-06-14 11:38:52 +03002288static void io_mem_init(void)
2289{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002290 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002291 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002292 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002293 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002294 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002295 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002296 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002297}
2298
Avi Kivityac1970f2012-10-03 16:22:53 +02002299static void mem_begin(MemoryListener *listener)
2300{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002301 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2303 uint16_t n;
2304
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002305 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002306 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002307 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002308 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002309 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002310 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002311 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002312 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002313
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002314 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002315 d->as = as;
2316 as->next_dispatch = d;
2317}
2318
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002319static void address_space_dispatch_free(AddressSpaceDispatch *d)
2320{
2321 phys_sections_free(&d->map);
2322 g_free(d);
2323}
2324
Paolo Bonzini00752702013-05-29 12:13:54 +02002325static void mem_commit(MemoryListener *listener)
2326{
2327 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002328 AddressSpaceDispatch *cur = as->dispatch;
2329 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002330
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002331 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002332
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002333 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002334 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002335 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002336 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002337}
2338
Avi Kivity1d711482012-10-02 18:54:45 +02002339static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002340{
Peter Maydell32857f42015-10-01 15:29:50 +01002341 CPUAddressSpace *cpuas;
2342 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002343
2344 /* since each CPU stores ram addresses in its TLB cache, we must
2345 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002346 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2347 cpu_reloading_memory_map();
2348 /* The CPU and TLB are protected by the iothread lock.
2349 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2350 * may have split the RCU critical section.
2351 */
2352 d = atomic_rcu_read(&cpuas->as->dispatch);
2353 cpuas->memory_dispatch = d;
2354 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002355}
2356
Avi Kivityac1970f2012-10-03 16:22:53 +02002357void address_space_init_dispatch(AddressSpace *as)
2358{
Paolo Bonzini00752702013-05-29 12:13:54 +02002359 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002360 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002361 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002362 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002363 .region_add = mem_add,
2364 .region_nop = mem_add,
2365 .priority = 0,
2366 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002367 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002368}
2369
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002370void address_space_unregister(AddressSpace *as)
2371{
2372 memory_listener_unregister(&as->dispatch_listener);
2373}
2374
Avi Kivity83f3c252012-10-07 12:59:55 +02002375void address_space_destroy_dispatch(AddressSpace *as)
2376{
2377 AddressSpaceDispatch *d = as->dispatch;
2378
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002379 atomic_rcu_set(&as->dispatch, NULL);
2380 if (d) {
2381 call_rcu(d, address_space_dispatch_free, rcu);
2382 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002383}
2384
Avi Kivity62152b82011-07-26 14:26:14 +03002385static void memory_map_init(void)
2386{
Anthony Liguori7267c092011-08-20 22:09:37 -05002387 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002388
Paolo Bonzini57271d62013-11-07 17:14:37 +01002389 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002390 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002391
Anthony Liguori7267c092011-08-20 22:09:37 -05002392 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002393 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2394 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002395 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002396}
2397
2398MemoryRegion *get_system_memory(void)
2399{
2400 return system_memory;
2401}
2402
Avi Kivity309cb472011-08-08 16:09:03 +03002403MemoryRegion *get_system_io(void)
2404{
2405 return system_io;
2406}
2407
pbrooke2eef172008-06-08 01:09:01 +00002408#endif /* !defined(CONFIG_USER_ONLY) */
2409
bellard13eb76e2004-01-24 15:23:36 +00002410/* physical memory access (slow version, mainly for debug) */
2411#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002412int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002413 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002414{
2415 int l, flags;
2416 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002417 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002418
2419 while (len > 0) {
2420 page = addr & TARGET_PAGE_MASK;
2421 l = (page + TARGET_PAGE_SIZE) - addr;
2422 if (l > len)
2423 l = len;
2424 flags = page_get_flags(page);
2425 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002426 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002427 if (is_write) {
2428 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002429 return -1;
bellard579a97f2007-11-11 14:26:47 +00002430 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002431 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002432 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002433 memcpy(p, buf, l);
2434 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002435 } else {
2436 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002437 return -1;
bellard579a97f2007-11-11 14:26:47 +00002438 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002439 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002440 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002441 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002442 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002443 }
2444 len -= l;
2445 buf += l;
2446 addr += l;
2447 }
Paul Brooka68fe892010-03-01 00:08:59 +00002448 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002449}
bellard8df1cd02005-01-28 22:37:22 +00002450
bellard13eb76e2004-01-24 15:23:36 +00002451#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002452
Paolo Bonzini845b6212015-03-23 11:45:53 +01002453static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002454 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002455{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002456 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002457 addr += memory_region_get_ram_addr(mr);
2458
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002459 /* No early return if dirty_log_mask is or becomes 0, because
2460 * cpu_physical_memory_set_dirty_range will still call
2461 * xen_modified_memory.
2462 */
2463 if (dirty_log_mask) {
2464 dirty_log_mask =
2465 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002466 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002467 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2468 tb_invalidate_phys_range(addr, addr + length);
2469 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2470 }
2471 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002472}
2473
Richard Henderson23326162013-07-08 14:55:59 -07002474static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002475{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002476 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002477
2478 /* Regions are assumed to support 1-4 byte accesses unless
2479 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002480 if (access_size_max == 0) {
2481 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002482 }
Richard Henderson23326162013-07-08 14:55:59 -07002483
2484 /* Bound the maximum access by the alignment of the address. */
2485 if (!mr->ops->impl.unaligned) {
2486 unsigned align_size_max = addr & -addr;
2487 if (align_size_max != 0 && align_size_max < access_size_max) {
2488 access_size_max = align_size_max;
2489 }
2490 }
2491
2492 /* Don't attempt accesses larger than the maximum. */
2493 if (l > access_size_max) {
2494 l = access_size_max;
2495 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002496 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002497
2498 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002499}
2500
Jan Kiszka4840f102015-06-18 18:47:22 +02002501static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002502{
Jan Kiszka4840f102015-06-18 18:47:22 +02002503 bool unlocked = !qemu_mutex_iothread_locked();
2504 bool release_lock = false;
2505
2506 if (unlocked && mr->global_locking) {
2507 qemu_mutex_lock_iothread();
2508 unlocked = false;
2509 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002510 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002511 if (mr->flush_coalesced_mmio) {
2512 if (unlocked) {
2513 qemu_mutex_lock_iothread();
2514 }
2515 qemu_flush_coalesced_mmio_buffer();
2516 if (unlocked) {
2517 qemu_mutex_unlock_iothread();
2518 }
2519 }
2520
2521 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002522}
2523
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002524/* Called within RCU critical section. */
2525static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2526 MemTxAttrs attrs,
2527 const uint8_t *buf,
2528 int len, hwaddr addr1,
2529 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002530{
bellard13eb76e2004-01-24 15:23:36 +00002531 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002532 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002533 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002534 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002535
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002536 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002537 if (!memory_access_is_direct(mr, true)) {
2538 release_lock |= prepare_mmio_access(mr);
2539 l = memory_access_size(mr, l, addr1);
2540 /* XXX: could force current_cpu to NULL to avoid
2541 potential bugs */
2542 switch (l) {
2543 case 8:
2544 /* 64 bit write access */
2545 val = ldq_p(buf);
2546 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2547 attrs);
2548 break;
2549 case 4:
2550 /* 32 bit write access */
2551 val = ldl_p(buf);
2552 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2553 attrs);
2554 break;
2555 case 2:
2556 /* 16 bit write access */
2557 val = lduw_p(buf);
2558 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2559 attrs);
2560 break;
2561 case 1:
2562 /* 8 bit write access */
2563 val = ldub_p(buf);
2564 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2565 attrs);
2566 break;
2567 default:
2568 abort();
bellard13eb76e2004-01-24 15:23:36 +00002569 }
2570 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002571 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002572 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002573 memcpy(ptr, buf, l);
2574 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002575 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002576
2577 if (release_lock) {
2578 qemu_mutex_unlock_iothread();
2579 release_lock = false;
2580 }
2581
bellard13eb76e2004-01-24 15:23:36 +00002582 len -= l;
2583 buf += l;
2584 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002585
2586 if (!len) {
2587 break;
2588 }
2589
2590 l = len;
2591 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002592 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002593
Peter Maydell3b643492015-04-26 16:49:23 +01002594 return result;
bellard13eb76e2004-01-24 15:23:36 +00002595}
bellard8df1cd02005-01-28 22:37:22 +00002596
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002597MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2598 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002599{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002600 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002601 hwaddr addr1;
2602 MemoryRegion *mr;
2603 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002604
2605 if (len > 0) {
2606 rcu_read_lock();
2607 l = len;
2608 mr = address_space_translate(as, addr, &addr1, &l, true);
2609 result = address_space_write_continue(as, addr, attrs, buf, len,
2610 addr1, l, mr);
2611 rcu_read_unlock();
2612 }
2613
2614 return result;
2615}
2616
2617/* Called within RCU critical section. */
2618MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2619 MemTxAttrs attrs, uint8_t *buf,
2620 int len, hwaddr addr1, hwaddr l,
2621 MemoryRegion *mr)
2622{
2623 uint8_t *ptr;
2624 uint64_t val;
2625 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002626 bool release_lock = false;
2627
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002628 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002629 if (!memory_access_is_direct(mr, false)) {
2630 /* I/O case */
2631 release_lock |= prepare_mmio_access(mr);
2632 l = memory_access_size(mr, l, addr1);
2633 switch (l) {
2634 case 8:
2635 /* 64 bit read access */
2636 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2637 attrs);
2638 stq_p(buf, val);
2639 break;
2640 case 4:
2641 /* 32 bit read access */
2642 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2643 attrs);
2644 stl_p(buf, val);
2645 break;
2646 case 2:
2647 /* 16 bit read access */
2648 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2649 attrs);
2650 stw_p(buf, val);
2651 break;
2652 case 1:
2653 /* 8 bit read access */
2654 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2655 attrs);
2656 stb_p(buf, val);
2657 break;
2658 default:
2659 abort();
2660 }
2661 } else {
2662 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002663 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002664 memcpy(buf, ptr, l);
2665 }
2666
2667 if (release_lock) {
2668 qemu_mutex_unlock_iothread();
2669 release_lock = false;
2670 }
2671
2672 len -= l;
2673 buf += l;
2674 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002675
2676 if (!len) {
2677 break;
2678 }
2679
2680 l = len;
2681 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002682 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002683
2684 return result;
2685}
2686
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002687MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2688 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002689{
2690 hwaddr l;
2691 hwaddr addr1;
2692 MemoryRegion *mr;
2693 MemTxResult result = MEMTX_OK;
2694
2695 if (len > 0) {
2696 rcu_read_lock();
2697 l = len;
2698 mr = address_space_translate(as, addr, &addr1, &l, false);
2699 result = address_space_read_continue(as, addr, attrs, buf, len,
2700 addr1, l, mr);
2701 rcu_read_unlock();
2702 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002703
2704 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002705}
2706
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002707MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2708 uint8_t *buf, int len, bool is_write)
2709{
2710 if (is_write) {
2711 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2712 } else {
2713 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2714 }
2715}
Avi Kivityac1970f2012-10-03 16:22:53 +02002716
Avi Kivitya8170e52012-10-23 12:30:10 +02002717void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002718 int len, int is_write)
2719{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002720 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2721 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002722}
2723
Alexander Graf582b55a2013-12-11 14:17:44 +01002724enum write_rom_type {
2725 WRITE_DATA,
2726 FLUSH_CACHE,
2727};
2728
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002729static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002730 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002731{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002732 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002733 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002734 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002735 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002736
Paolo Bonzini41063e12015-03-18 14:21:43 +01002737 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002738 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002739 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002740 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002741
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002742 if (!(memory_region_is_ram(mr) ||
2743 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002744 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002745 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002746 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002747 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002748 switch (type) {
2749 case WRITE_DATA:
2750 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002751 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002752 break;
2753 case FLUSH_CACHE:
2754 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2755 break;
2756 }
bellardd0ecd2a2006-04-23 17:14:48 +00002757 }
2758 len -= l;
2759 buf += l;
2760 addr += l;
2761 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002762 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002763}
2764
Alexander Graf582b55a2013-12-11 14:17:44 +01002765/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002766void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002767 const uint8_t *buf, int len)
2768{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002769 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002770}
2771
2772void cpu_flush_icache_range(hwaddr start, int len)
2773{
2774 /*
2775 * This function should do the same thing as an icache flush that was
2776 * triggered from within the guest. For TCG we are always cache coherent,
2777 * so there is no need to flush anything. For KVM / Xen we need to flush
2778 * the host's instruction cache at least.
2779 */
2780 if (tcg_enabled()) {
2781 return;
2782 }
2783
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002784 cpu_physical_memory_write_rom_internal(&address_space_memory,
2785 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002786}
2787
aliguori6d16c2f2009-01-22 16:59:11 +00002788typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002789 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002790 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002791 hwaddr addr;
2792 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002793 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002794} BounceBuffer;
2795
2796static BounceBuffer bounce;
2797
aliguoriba223c22009-01-22 16:59:16 +00002798typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002799 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002800 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002801} MapClient;
2802
Fam Zheng38e047b2015-03-16 17:03:35 +08002803QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002804static QLIST_HEAD(map_client_list, MapClient) map_client_list
2805 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002806
Fam Zhenge95205e2015-03-16 17:03:37 +08002807static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002808{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002809 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002810 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002811}
2812
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002813static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002814{
2815 MapClient *client;
2816
Blue Swirl72cf2d42009-09-12 07:36:22 +00002817 while (!QLIST_EMPTY(&map_client_list)) {
2818 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002819 qemu_bh_schedule(client->bh);
2820 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002821 }
2822}
2823
Fam Zhenge95205e2015-03-16 17:03:37 +08002824void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002825{
2826 MapClient *client = g_malloc(sizeof(*client));
2827
Fam Zheng38e047b2015-03-16 17:03:35 +08002828 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002829 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002830 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002831 if (!atomic_read(&bounce.in_use)) {
2832 cpu_notify_map_clients_locked();
2833 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002834 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002835}
2836
Fam Zheng38e047b2015-03-16 17:03:35 +08002837void cpu_exec_init_all(void)
2838{
2839 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002840 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002841 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002842 qemu_mutex_init(&map_client_list_lock);
2843}
2844
Fam Zhenge95205e2015-03-16 17:03:37 +08002845void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002846{
Fam Zhenge95205e2015-03-16 17:03:37 +08002847 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002848
Fam Zhenge95205e2015-03-16 17:03:37 +08002849 qemu_mutex_lock(&map_client_list_lock);
2850 QLIST_FOREACH(client, &map_client_list, link) {
2851 if (client->bh == bh) {
2852 cpu_unregister_map_client_do(client);
2853 break;
2854 }
2855 }
2856 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002857}
2858
2859static void cpu_notify_map_clients(void)
2860{
Fam Zheng38e047b2015-03-16 17:03:35 +08002861 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002862 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002863 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002864}
2865
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002866bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2867{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002868 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002869 hwaddr l, xlat;
2870
Paolo Bonzini41063e12015-03-18 14:21:43 +01002871 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002872 while (len > 0) {
2873 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002874 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2875 if (!memory_access_is_direct(mr, is_write)) {
2876 l = memory_access_size(mr, l, addr);
2877 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002878 return false;
2879 }
2880 }
2881
2882 len -= l;
2883 addr += l;
2884 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002885 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002886 return true;
2887}
2888
aliguori6d16c2f2009-01-22 16:59:11 +00002889/* Map a physical memory region into a host virtual address.
2890 * May map a subset of the requested range, given by and returned in *plen.
2891 * May return NULL if resources needed to perform the mapping are exhausted.
2892 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002893 * Use cpu_register_map_client() to know when retrying the map operation is
2894 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002895 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002896void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002897 hwaddr addr,
2898 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002899 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002900{
Avi Kivitya8170e52012-10-23 12:30:10 +02002901 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002902 hwaddr done = 0;
2903 hwaddr l, xlat, base;
2904 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002905 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002906
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002907 if (len == 0) {
2908 return NULL;
2909 }
aliguori6d16c2f2009-01-22 16:59:11 +00002910
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002911 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002912 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002913 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002914
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002915 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002916 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002917 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002918 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002919 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002920 /* Avoid unbounded allocations */
2921 l = MIN(l, TARGET_PAGE_SIZE);
2922 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002923 bounce.addr = addr;
2924 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002925
2926 memory_region_ref(mr);
2927 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002928 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002929 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2930 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002931 }
aliguori6d16c2f2009-01-22 16:59:11 +00002932
Paolo Bonzini41063e12015-03-18 14:21:43 +01002933 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002934 *plen = l;
2935 return bounce.buffer;
2936 }
2937
2938 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002939
2940 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002941 len -= l;
2942 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002943 done += l;
2944 if (len == 0) {
2945 break;
2946 }
2947
2948 l = len;
2949 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2950 if (this_mr != mr || xlat != base + done) {
2951 break;
2952 }
aliguori6d16c2f2009-01-22 16:59:11 +00002953 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002954
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002955 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002956 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002957 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002958 rcu_read_unlock();
2959
2960 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002961}
2962
Avi Kivityac1970f2012-10-03 16:22:53 +02002963/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002964 * Will also mark the memory as dirty if is_write == 1. access_len gives
2965 * the amount of memory that was actually read or written by the caller.
2966 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002967void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2968 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002969{
2970 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002971 MemoryRegion *mr;
2972 ram_addr_t addr1;
2973
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002974 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002975 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002976 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002977 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002978 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002979 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002980 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002981 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002982 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002983 return;
2984 }
2985 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002986 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2987 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002988 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002989 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002990 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002991 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002992 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002993 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002994}
bellardd0ecd2a2006-04-23 17:14:48 +00002995
Avi Kivitya8170e52012-10-23 12:30:10 +02002996void *cpu_physical_memory_map(hwaddr addr,
2997 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002998 int is_write)
2999{
3000 return address_space_map(&address_space_memory, addr, plen, is_write);
3001}
3002
Avi Kivitya8170e52012-10-23 12:30:10 +02003003void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3004 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003005{
3006 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3007}
3008
bellard8df1cd02005-01-28 22:37:22 +00003009/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003010static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3011 MemTxAttrs attrs,
3012 MemTxResult *result,
3013 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003014{
bellard8df1cd02005-01-28 22:37:22 +00003015 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003016 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003017 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003018 hwaddr l = 4;
3019 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003020 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003021 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003022
Paolo Bonzini41063e12015-03-18 14:21:43 +01003023 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003024 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003025 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003026 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003027
bellard8df1cd02005-01-28 22:37:22 +00003028 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003029 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003030#if defined(TARGET_WORDS_BIGENDIAN)
3031 if (endian == DEVICE_LITTLE_ENDIAN) {
3032 val = bswap32(val);
3033 }
3034#else
3035 if (endian == DEVICE_BIG_ENDIAN) {
3036 val = bswap32(val);
3037 }
3038#endif
bellard8df1cd02005-01-28 22:37:22 +00003039 } else {
3040 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003041 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003042 switch (endian) {
3043 case DEVICE_LITTLE_ENDIAN:
3044 val = ldl_le_p(ptr);
3045 break;
3046 case DEVICE_BIG_ENDIAN:
3047 val = ldl_be_p(ptr);
3048 break;
3049 default:
3050 val = ldl_p(ptr);
3051 break;
3052 }
Peter Maydell50013112015-04-26 16:49:24 +01003053 r = MEMTX_OK;
3054 }
3055 if (result) {
3056 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003057 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003058 if (release_lock) {
3059 qemu_mutex_unlock_iothread();
3060 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003061 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003062 return val;
3063}
3064
Peter Maydell50013112015-04-26 16:49:24 +01003065uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3066 MemTxAttrs attrs, MemTxResult *result)
3067{
3068 return address_space_ldl_internal(as, addr, attrs, result,
3069 DEVICE_NATIVE_ENDIAN);
3070}
3071
3072uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3073 MemTxAttrs attrs, MemTxResult *result)
3074{
3075 return address_space_ldl_internal(as, addr, attrs, result,
3076 DEVICE_LITTLE_ENDIAN);
3077}
3078
3079uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3080 MemTxAttrs attrs, MemTxResult *result)
3081{
3082 return address_space_ldl_internal(as, addr, attrs, result,
3083 DEVICE_BIG_ENDIAN);
3084}
3085
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003086uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003087{
Peter Maydell50013112015-04-26 16:49:24 +01003088 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003089}
3090
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003091uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003092{
Peter Maydell50013112015-04-26 16:49:24 +01003093 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003094}
3095
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003096uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003097{
Peter Maydell50013112015-04-26 16:49:24 +01003098 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003099}
3100
bellard84b7b8e2005-11-28 21:19:04 +00003101/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003102static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3103 MemTxAttrs attrs,
3104 MemTxResult *result,
3105 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003106{
bellard84b7b8e2005-11-28 21:19:04 +00003107 uint8_t *ptr;
3108 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003109 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003110 hwaddr l = 8;
3111 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003112 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003113 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003114
Paolo Bonzini41063e12015-03-18 14:21:43 +01003115 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003116 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003117 false);
3118 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003119 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003120
bellard84b7b8e2005-11-28 21:19:04 +00003121 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003122 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003123#if defined(TARGET_WORDS_BIGENDIAN)
3124 if (endian == DEVICE_LITTLE_ENDIAN) {
3125 val = bswap64(val);
3126 }
3127#else
3128 if (endian == DEVICE_BIG_ENDIAN) {
3129 val = bswap64(val);
3130 }
3131#endif
bellard84b7b8e2005-11-28 21:19:04 +00003132 } else {
3133 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003134 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135 switch (endian) {
3136 case DEVICE_LITTLE_ENDIAN:
3137 val = ldq_le_p(ptr);
3138 break;
3139 case DEVICE_BIG_ENDIAN:
3140 val = ldq_be_p(ptr);
3141 break;
3142 default:
3143 val = ldq_p(ptr);
3144 break;
3145 }
Peter Maydell50013112015-04-26 16:49:24 +01003146 r = MEMTX_OK;
3147 }
3148 if (result) {
3149 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003150 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003151 if (release_lock) {
3152 qemu_mutex_unlock_iothread();
3153 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003154 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003155 return val;
3156}
3157
Peter Maydell50013112015-04-26 16:49:24 +01003158uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3159 MemTxAttrs attrs, MemTxResult *result)
3160{
3161 return address_space_ldq_internal(as, addr, attrs, result,
3162 DEVICE_NATIVE_ENDIAN);
3163}
3164
3165uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3166 MemTxAttrs attrs, MemTxResult *result)
3167{
3168 return address_space_ldq_internal(as, addr, attrs, result,
3169 DEVICE_LITTLE_ENDIAN);
3170}
3171
3172uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3173 MemTxAttrs attrs, MemTxResult *result)
3174{
3175 return address_space_ldq_internal(as, addr, attrs, result,
3176 DEVICE_BIG_ENDIAN);
3177}
3178
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003179uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003180{
Peter Maydell50013112015-04-26 16:49:24 +01003181 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003182}
3183
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003184uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003185{
Peter Maydell50013112015-04-26 16:49:24 +01003186 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003187}
3188
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003189uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003190{
Peter Maydell50013112015-04-26 16:49:24 +01003191 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003192}
3193
bellardaab33092005-10-30 20:48:42 +00003194/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003195uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3196 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003197{
3198 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003199 MemTxResult r;
3200
3201 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3202 if (result) {
3203 *result = r;
3204 }
bellardaab33092005-10-30 20:48:42 +00003205 return val;
3206}
3207
Peter Maydell50013112015-04-26 16:49:24 +01003208uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3209{
3210 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3211}
3212
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003213/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003214static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3215 hwaddr addr,
3216 MemTxAttrs attrs,
3217 MemTxResult *result,
3218 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003219{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003220 uint8_t *ptr;
3221 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003222 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003223 hwaddr l = 2;
3224 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003225 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003226 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003227
Paolo Bonzini41063e12015-03-18 14:21:43 +01003228 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003229 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003230 false);
3231 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003232 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003233
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003234 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003235 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003236#if defined(TARGET_WORDS_BIGENDIAN)
3237 if (endian == DEVICE_LITTLE_ENDIAN) {
3238 val = bswap16(val);
3239 }
3240#else
3241 if (endian == DEVICE_BIG_ENDIAN) {
3242 val = bswap16(val);
3243 }
3244#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003245 } else {
3246 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003247 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248 switch (endian) {
3249 case DEVICE_LITTLE_ENDIAN:
3250 val = lduw_le_p(ptr);
3251 break;
3252 case DEVICE_BIG_ENDIAN:
3253 val = lduw_be_p(ptr);
3254 break;
3255 default:
3256 val = lduw_p(ptr);
3257 break;
3258 }
Peter Maydell50013112015-04-26 16:49:24 +01003259 r = MEMTX_OK;
3260 }
3261 if (result) {
3262 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003263 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003264 if (release_lock) {
3265 qemu_mutex_unlock_iothread();
3266 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003267 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003268 return val;
bellardaab33092005-10-30 20:48:42 +00003269}
3270
Peter Maydell50013112015-04-26 16:49:24 +01003271uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3272 MemTxAttrs attrs, MemTxResult *result)
3273{
3274 return address_space_lduw_internal(as, addr, attrs, result,
3275 DEVICE_NATIVE_ENDIAN);
3276}
3277
3278uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3279 MemTxAttrs attrs, MemTxResult *result)
3280{
3281 return address_space_lduw_internal(as, addr, attrs, result,
3282 DEVICE_LITTLE_ENDIAN);
3283}
3284
3285uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3286 MemTxAttrs attrs, MemTxResult *result)
3287{
3288 return address_space_lduw_internal(as, addr, attrs, result,
3289 DEVICE_BIG_ENDIAN);
3290}
3291
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003292uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003293{
Peter Maydell50013112015-04-26 16:49:24 +01003294 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003295}
3296
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003297uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003298{
Peter Maydell50013112015-04-26 16:49:24 +01003299 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003300}
3301
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003302uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003303{
Peter Maydell50013112015-04-26 16:49:24 +01003304 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003305}
3306
bellard8df1cd02005-01-28 22:37:22 +00003307/* warning: addr must be aligned. The ram page is not masked as dirty
3308 and the code inside is not invalidated. It is useful if the dirty
3309 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003310void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3311 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003312{
bellard8df1cd02005-01-28 22:37:22 +00003313 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003314 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003315 hwaddr l = 4;
3316 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003317 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003318 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003319 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003320
Paolo Bonzini41063e12015-03-18 14:21:43 +01003321 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003322 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003323 true);
3324 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003325 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003326
Peter Maydell50013112015-04-26 16:49:24 +01003327 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003328 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003329 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003330 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003331
Paolo Bonzini845b6212015-03-23 11:45:53 +01003332 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3333 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003334 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3335 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003336 r = MEMTX_OK;
3337 }
3338 if (result) {
3339 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003340 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003341 if (release_lock) {
3342 qemu_mutex_unlock_iothread();
3343 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003344 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003345}
3346
Peter Maydell50013112015-04-26 16:49:24 +01003347void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3348{
3349 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3350}
3351
bellard8df1cd02005-01-28 22:37:22 +00003352/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003353static inline void address_space_stl_internal(AddressSpace *as,
3354 hwaddr addr, uint32_t val,
3355 MemTxAttrs attrs,
3356 MemTxResult *result,
3357 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003358{
bellard8df1cd02005-01-28 22:37:22 +00003359 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003360 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003361 hwaddr l = 4;
3362 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003363 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003364 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003365
Paolo Bonzini41063e12015-03-18 14:21:43 +01003366 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003367 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003368 true);
3369 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003370 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003371
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003372#if defined(TARGET_WORDS_BIGENDIAN)
3373 if (endian == DEVICE_LITTLE_ENDIAN) {
3374 val = bswap32(val);
3375 }
3376#else
3377 if (endian == DEVICE_BIG_ENDIAN) {
3378 val = bswap32(val);
3379 }
3380#endif
Peter Maydell50013112015-04-26 16:49:24 +01003381 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003382 } else {
bellard8df1cd02005-01-28 22:37:22 +00003383 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003384 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003385 switch (endian) {
3386 case DEVICE_LITTLE_ENDIAN:
3387 stl_le_p(ptr, val);
3388 break;
3389 case DEVICE_BIG_ENDIAN:
3390 stl_be_p(ptr, val);
3391 break;
3392 default:
3393 stl_p(ptr, val);
3394 break;
3395 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003396 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003397 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003398 }
Peter Maydell50013112015-04-26 16:49:24 +01003399 if (result) {
3400 *result = r;
3401 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003402 if (release_lock) {
3403 qemu_mutex_unlock_iothread();
3404 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003405 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003406}
3407
3408void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3409 MemTxAttrs attrs, MemTxResult *result)
3410{
3411 address_space_stl_internal(as, addr, val, attrs, result,
3412 DEVICE_NATIVE_ENDIAN);
3413}
3414
3415void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3416 MemTxAttrs attrs, MemTxResult *result)
3417{
3418 address_space_stl_internal(as, addr, val, attrs, result,
3419 DEVICE_LITTLE_ENDIAN);
3420}
3421
3422void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3423 MemTxAttrs attrs, MemTxResult *result)
3424{
3425 address_space_stl_internal(as, addr, val, attrs, result,
3426 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003427}
3428
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003429void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003430{
Peter Maydell50013112015-04-26 16:49:24 +01003431 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003432}
3433
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003434void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003435{
Peter Maydell50013112015-04-26 16:49:24 +01003436 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003437}
3438
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003439void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003440{
Peter Maydell50013112015-04-26 16:49:24 +01003441 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003442}
3443
bellardaab33092005-10-30 20:48:42 +00003444/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003445void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3446 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003447{
3448 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003449 MemTxResult r;
3450
3451 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3452 if (result) {
3453 *result = r;
3454 }
3455}
3456
3457void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3458{
3459 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003460}
3461
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003462/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003463static inline void address_space_stw_internal(AddressSpace *as,
3464 hwaddr addr, uint32_t val,
3465 MemTxAttrs attrs,
3466 MemTxResult *result,
3467 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003468{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003469 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003470 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003471 hwaddr l = 2;
3472 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003473 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003474 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003475
Paolo Bonzini41063e12015-03-18 14:21:43 +01003476 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003477 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003478 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003479 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003480
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003481#if defined(TARGET_WORDS_BIGENDIAN)
3482 if (endian == DEVICE_LITTLE_ENDIAN) {
3483 val = bswap16(val);
3484 }
3485#else
3486 if (endian == DEVICE_BIG_ENDIAN) {
3487 val = bswap16(val);
3488 }
3489#endif
Peter Maydell50013112015-04-26 16:49:24 +01003490 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003491 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003492 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003493 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003494 switch (endian) {
3495 case DEVICE_LITTLE_ENDIAN:
3496 stw_le_p(ptr, val);
3497 break;
3498 case DEVICE_BIG_ENDIAN:
3499 stw_be_p(ptr, val);
3500 break;
3501 default:
3502 stw_p(ptr, val);
3503 break;
3504 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003505 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003506 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003507 }
Peter Maydell50013112015-04-26 16:49:24 +01003508 if (result) {
3509 *result = r;
3510 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003511 if (release_lock) {
3512 qemu_mutex_unlock_iothread();
3513 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003514 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003515}
3516
3517void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3518 MemTxAttrs attrs, MemTxResult *result)
3519{
3520 address_space_stw_internal(as, addr, val, attrs, result,
3521 DEVICE_NATIVE_ENDIAN);
3522}
3523
3524void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3525 MemTxAttrs attrs, MemTxResult *result)
3526{
3527 address_space_stw_internal(as, addr, val, attrs, result,
3528 DEVICE_LITTLE_ENDIAN);
3529}
3530
3531void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3532 MemTxAttrs attrs, MemTxResult *result)
3533{
3534 address_space_stw_internal(as, addr, val, attrs, result,
3535 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003536}
3537
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003538void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003539{
Peter Maydell50013112015-04-26 16:49:24 +01003540 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003541}
3542
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003543void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003544{
Peter Maydell50013112015-04-26 16:49:24 +01003545 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003546}
3547
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003548void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003549{
Peter Maydell50013112015-04-26 16:49:24 +01003550 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003551}
3552
bellardaab33092005-10-30 20:48:42 +00003553/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003554void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3555 MemTxAttrs attrs, MemTxResult *result)
3556{
3557 MemTxResult r;
3558 val = tswap64(val);
3559 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3560 if (result) {
3561 *result = r;
3562 }
3563}
3564
3565void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3566 MemTxAttrs attrs, MemTxResult *result)
3567{
3568 MemTxResult r;
3569 val = cpu_to_le64(val);
3570 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3571 if (result) {
3572 *result = r;
3573 }
3574}
3575void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3576 MemTxAttrs attrs, MemTxResult *result)
3577{
3578 MemTxResult r;
3579 val = cpu_to_be64(val);
3580 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3581 if (result) {
3582 *result = r;
3583 }
3584}
3585
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003586void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003587{
Peter Maydell50013112015-04-26 16:49:24 +01003588 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003589}
3590
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003591void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003592{
Peter Maydell50013112015-04-26 16:49:24 +01003593 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003594}
3595
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003596void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003597{
Peter Maydell50013112015-04-26 16:49:24 +01003598 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003599}
3600
aliguori5e2972f2009-03-28 17:51:36 +00003601/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003602int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003603 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003604{
3605 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003606 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003607 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003608
3609 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003610 int asidx;
3611 MemTxAttrs attrs;
3612
bellard13eb76e2004-01-24 15:23:36 +00003613 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003614 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3615 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003616 /* if no physical page mapped, return an error */
3617 if (phys_addr == -1)
3618 return -1;
3619 l = (page + TARGET_PAGE_SIZE) - addr;
3620 if (l > len)
3621 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003622 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003623 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003624 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3625 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003626 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003627 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3628 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003629 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003630 }
bellard13eb76e2004-01-24 15:23:36 +00003631 len -= l;
3632 buf += l;
3633 addr += l;
3634 }
3635 return 0;
3636}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003637
3638/*
3639 * Allows code that needs to deal with migration bitmaps etc to still be built
3640 * target independent.
3641 */
3642size_t qemu_target_page_bits(void)
3643{
3644 return TARGET_PAGE_BITS;
3645}
3646
Paul Brooka68fe892010-03-01 00:08:59 +00003647#endif
bellard13eb76e2004-01-24 15:23:36 +00003648
Blue Swirl8e4a4242013-01-06 18:30:17 +00003649/*
3650 * A helper function for the _utterly broken_ virtio device model to find out if
3651 * it's running on a big endian machine. Don't do this at home kids!
3652 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003653bool target_words_bigendian(void);
3654bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003655{
3656#if defined(TARGET_WORDS_BIGENDIAN)
3657 return true;
3658#else
3659 return false;
3660#endif
3661}
3662
Wen Congyang76f35532012-05-07 12:04:18 +08003663#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003664bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003665{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003666 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003667 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003668 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003669
Paolo Bonzini41063e12015-03-18 14:21:43 +01003670 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003671 mr = address_space_translate(&address_space_memory,
3672 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003673
Paolo Bonzini41063e12015-03-18 14:21:43 +01003674 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3675 rcu_read_unlock();
3676 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003677}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003678
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003679int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003680{
3681 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003682 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003683
Mike Day0dc3f442013-09-05 14:41:35 -04003684 rcu_read_lock();
3685 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003686 ret = func(block->idstr, block->host, block->offset,
3687 block->used_length, opaque);
3688 if (ret) {
3689 break;
3690 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003691 }
Mike Day0dc3f442013-09-05 14:41:35 -04003692 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003693 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003694}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003695#endif