blob: e2966ce6c4246c6a183fc4015961a6d96f888cc9 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010027#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000028#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020029#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020031#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010032#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010033#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020042#include "hw/hw.h"
43#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010044#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020045#include "sysemu/dma.h"
46#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010047#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010048#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000049#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010050#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040051#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020052#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030054#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000055
Paolo Bonzini022c62c2012-12-17 18:19:49 +010056#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020057#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030058#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020059
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020060#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030061#ifndef _WIN32
62#include "qemu/mmap-alloc.h"
63#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020064
blueswir1db7b5422007-05-26 17:36:03 +000065//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000066
pbrook99773bd2006-04-16 15:14:59 +000067#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040068/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
69 * are protected by the ramlist lock.
70 */
Mike Day0d53d9f2015-01-21 13:45:24 +010071RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030072
73static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030074static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030075
Avi Kivityf6790af2012-10-02 20:13:51 +020076AddressSpace address_space_io;
77AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020078
Paolo Bonzini0844e002013-05-24 14:37:28 +020079MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020080static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020081
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080082/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
83#define RAM_PREALLOC (1 << 0)
84
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080085/* RAM is mmap-ed with MAP_SHARED */
86#define RAM_SHARED (1 << 1)
87
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020088/* Only a portion of RAM (used_length) is actually used, and migrated.
89 * This used_length size can change across reboots.
90 */
91#define RAM_RESIZEABLE (1 << 2)
92
pbrooke2eef172008-06-08 01:09:01 +000093#endif
bellard9fa3e852004-01-04 18:06:42 +000094
Andreas Färberbdc44642013-06-24 23:50:24 +020095struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000096/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020098__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000099/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000100 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000101 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100102int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000103
pbrooke2eef172008-06-08 01:09:01 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106typedef struct PhysPageEntry PhysPageEntry;
107
108struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200113};
114
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200115#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100118#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200120#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121#define P_L2_SIZE (1 << P_L2_BITS)
122
123#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100128 struct rcu_head rcu;
129
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100139 struct rcu_head rcu;
140
Fam Zheng729633c2016-03-01 14:18:24 +0800141 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Fam Zheng29cb5332016-03-01 14:18:23 +0800314static inline bool section_covers_addr(const MemoryRegionSection *section,
315 hwaddr addr)
316{
317 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
318 * the section must cover the entire address space.
319 */
320 return section->size.hi ||
321 range_covers_byte(section->offset_within_address_space,
322 section->size.lo, addr);
323}
324
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200325static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200326 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000327{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200328 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200329 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200330 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200331
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200332 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200333 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200334 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200335 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200336 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100337 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200338 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200339
Fam Zheng29cb5332016-03-01 14:18:23 +0800340 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200341 return &sections[lp.ptr];
342 } else {
343 return &sections[PHYS_SECTION_UNASSIGNED];
344 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200345}
346
Blue Swirle5548612012-04-21 13:08:33 +0000347bool memory_region_is_unassigned(MemoryRegion *mr)
348{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200349 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000350 && mr != &io_mem_watch;
351}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200352
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100353/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200354static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200355 hwaddr addr,
356 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200357{
Fam Zheng729633c2016-03-01 14:18:24 +0800358 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200359 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800360 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200361
Fam Zheng729633c2016-03-01 14:18:24 +0800362 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
363 section_covers_addr(section, addr)) {
364 update = false;
365 } else {
366 section = phys_page_find(d->phys_map, addr, d->map.nodes,
367 d->map.sections);
368 update = true;
369 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200370 if (resolve_subpage && section->mr->subpage) {
371 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200372 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 }
Fam Zheng729633c2016-03-01 14:18:24 +0800374 if (update) {
375 atomic_set(&d->mru_section, section);
376 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200377 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200378}
379
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100380/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200381static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200382address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200383 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200384{
385 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100387 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200388
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 /* Compute offset within MemoryRegionSection */
391 addr -= section->offset_within_address_space;
392
393 /* Compute offset within MemoryRegion */
394 *xlat = addr + section->offset_within_region;
395
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200396 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200397
398 /* MMIO registers can be expected to perform full-width accesses based only
399 * on their address, without considering adjacent registers that could
400 * decode to completely different MemoryRegions. When such registers
401 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
402 * regions overlap wildly. For this reason we cannot clamp the accesses
403 * here.
404 *
405 * If the length is small (as is the case for address_space_ldl/stl),
406 * everything works fine. If the incoming length is large, however,
407 * the caller really has to do the clamping through memory_access_size.
408 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200409 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200410 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200411 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
412 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200413 return section;
414}
Jan Kiszka90260c62013-05-26 21:46:51 +0200415
Paolo Bonzini41063e12015-03-18 14:21:43 +0100416/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200417MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
418 hwaddr *xlat, hwaddr *plen,
419 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200420{
Avi Kivity30951152012-10-30 13:47:46 +0200421 IOMMUTLBEntry iotlb;
422 MemoryRegionSection *section;
423 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200424
425 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
427 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200428 mr = section->mr;
429
430 if (!mr->iommu_ops) {
431 break;
432 }
433
Le Tan8d7b8cb2014-08-16 13:55:37 +0800434 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200435 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
436 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700437 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200438 if (!(iotlb.perm & (1 << is_write))) {
439 mr = &io_mem_unassigned;
440 break;
441 }
442
443 as = iotlb.target_as;
444 }
445
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000446 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100447 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700448 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100449 }
450
Avi Kivity30951152012-10-30 13:47:46 +0200451 *xlat = addr;
452 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200453}
454
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100455/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200456MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000457address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200458 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200459{
Avi Kivity30951152012-10-30 13:47:46 +0200460 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000461 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
462
463 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200464
465 assert(!section->mr->iommu_ops);
466 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200467}
bellard9fa3e852004-01-04 18:06:42 +0000468#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000469
Andreas Färberb170fce2013-01-20 20:23:22 +0100470#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000471
Juan Quintelae59fb372009-09-29 22:48:21 +0200472static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473{
Andreas Färber259186a2013-01-17 18:51:17 +0100474 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200475
aurel323098dba2009-03-07 21:28:24 +0000476 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
477 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100478 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100479 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000480
481 return 0;
482}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200483
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400484static int cpu_common_pre_load(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489
490 return 0;
491}
492
493static bool cpu_common_exception_index_needed(void *opaque)
494{
495 CPUState *cpu = opaque;
496
Paolo Bonziniadee6422014-12-19 12:53:14 +0100497 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400498}
499
500static const VMStateDescription vmstate_cpu_common_exception_index = {
501 .name = "cpu_common/exception_index",
502 .version_id = 1,
503 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200504 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400505 .fields = (VMStateField[]) {
506 VMSTATE_INT32(exception_index, CPUState),
507 VMSTATE_END_OF_LIST()
508 }
509};
510
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300511static bool cpu_common_crash_occurred_needed(void *opaque)
512{
513 CPUState *cpu = opaque;
514
515 return cpu->crash_occurred;
516}
517
518static const VMStateDescription vmstate_cpu_common_crash_occurred = {
519 .name = "cpu_common/crash_occurred",
520 .version_id = 1,
521 .minimum_version_id = 1,
522 .needed = cpu_common_crash_occurred_needed,
523 .fields = (VMStateField[]) {
524 VMSTATE_BOOL(crash_occurred, CPUState),
525 VMSTATE_END_OF_LIST()
526 }
527};
528
Andreas Färber1a1562f2013-06-17 04:09:11 +0200529const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200530 .name = "cpu_common",
531 .version_id = 1,
532 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200534 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200535 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100536 VMSTATE_UINT32(halted, CPUState),
537 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200538 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400539 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200540 .subsections = (const VMStateDescription*[]) {
541 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300542 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200544 }
545};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200546
pbrook9656f322008-07-01 20:01:19 +0000547#endif
548
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100549CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400550{
Andreas Färberbdc44642013-06-24 23:50:24 +0200551 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400552
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100554 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200555 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 }
Glauber Costa950f1472009-06-09 12:15:18 -0400557 }
558
Andreas Färberbdc44642013-06-24 23:50:24 +0200559 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400560}
561
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000563void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000564{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000565 CPUAddressSpace *newas;
566
567 /* Target code should have set num_ases before calling us */
568 assert(asidx < cpu->num_ases);
569
Peter Maydell56943e82016-01-21 14:15:04 +0000570 if (asidx == 0) {
571 /* address space 0 gets the convenience alias */
572 cpu->as = as;
573 }
574
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000575 /* KVM cannot currently support multiple address spaces. */
576 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 if (!cpu->cpu_ases) {
579 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580 }
Peter Maydell32857f42015-10-01 15:29:50 +0100581
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000582 newas = &cpu->cpu_ases[asidx];
583 newas->cpu = cpu;
584 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000585 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000586 newas->tcg_as_listener.commit = tcg_commit;
587 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000588 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000589}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000590
591AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
592{
593 /* Return the AddressSpace corresponding to the specified index */
594 return cpu->cpu_ases[asidx].as;
595}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000596#endif
597
Bharata B Raob7bca732015-06-23 19:31:13 -0700598#ifndef CONFIG_USER_ONLY
599static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
600
601static int cpu_get_free_index(Error **errp)
602{
603 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
604
605 if (cpu >= MAX_CPUMASK_BITS) {
606 error_setg(errp, "Trying to use more CPUs than max of %d",
607 MAX_CPUMASK_BITS);
608 return -1;
609 }
610
611 bitmap_set(cpu_index_map, cpu, 1);
612 return cpu;
613}
614
615void cpu_exec_exit(CPUState *cpu)
616{
617 if (cpu->cpu_index == -1) {
618 /* cpu_index was never allocated by this @cpu or was already freed. */
619 return;
620 }
621
622 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
623 cpu->cpu_index = -1;
624}
625#else
626
627static int cpu_get_free_index(Error **errp)
628{
629 CPUState *some_cpu;
630 int cpu_index = 0;
631
632 CPU_FOREACH(some_cpu) {
633 cpu_index++;
634 }
635 return cpu_index;
636}
637
638void cpu_exec_exit(CPUState *cpu)
639{
640}
641#endif
642
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700643void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000644{
Andreas Färberb170fce2013-01-20 20:23:22 +0100645 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700646 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000647
Peter Maydell56943e82016-01-21 14:15:04 +0000648 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000649 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000650
Eduardo Habkost291135b2015-04-27 17:00:33 -0300651#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300652 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000653
654 /* This is a softmmu CPU object, so create a property for it
655 * so users can wire up its memory. (This can't go in qom/cpu.c
656 * because that file is compiled only once for both user-mode
657 * and system builds.) The default if no link is set up is to use
658 * the system address space.
659 */
660 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
661 (Object **)&cpu->memory,
662 qdev_prop_allow_set_link_before_realize,
663 OBJ_PROP_LINK_UNREF_ON_RELEASE,
664 &error_abort);
665 cpu->memory = system_memory;
666 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300667#endif
668
pbrookc2764712009-03-07 15:24:59 +0000669#if defined(CONFIG_USER_ONLY)
670 cpu_list_lock();
671#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200672 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700673 if (local_err) {
674 error_propagate(errp, local_err);
675#if defined(CONFIG_USER_ONLY)
676 cpu_list_unlock();
677#endif
678 return;
bellard6a00d602005-11-21 23:25:50 +0000679 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200680 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000681#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000683 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200685 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200686 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200687 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100688 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200689 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100690 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200691#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000692}
693
Paul Brook94df27f2010-02-28 23:47:45 +0000694#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200695static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000696{
697 tb_invalidate_phys_page_range(pc, pc + 1, 0);
698}
699#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200700static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400701{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000702 MemTxAttrs attrs;
703 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
704 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400705 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000706 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100707 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400708 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400709}
bellardc27004e2005-01-03 23:35:10 +0000710#endif
bellardd720b932004-04-25 17:57:43 +0000711
Paul Brookc527ee82010-03-01 03:31:14 +0000712#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200713void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000714
715{
716}
717
Peter Maydell3ee887e2014-09-12 14:06:48 +0100718int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
719 int flags)
720{
721 return -ENOSYS;
722}
723
724void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
725{
726}
727
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000729 int flags, CPUWatchpoint **watchpoint)
730{
731 return -ENOSYS;
732}
733#else
pbrook6658ffb2007-03-16 23:58:11 +0000734/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200735int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000736 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000737{
aliguoric0ce9982008-11-25 22:13:57 +0000738 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000739
Peter Maydell05068c02014-09-12 14:06:48 +0100740 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700741 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200742 error_report("tried to set invalid watchpoint at %"
743 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000744 return -EINVAL;
745 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500746 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000747
aliguoria1d1bb32008-11-18 20:07:32 +0000748 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100749 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000750 wp->flags = flags;
751
aliguori2dc9f412008-11-18 20:56:59 +0000752 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200753 if (flags & BP_GDB) {
754 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
755 } else {
756 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
757 }
aliguoria1d1bb32008-11-18 20:07:32 +0000758
Andreas Färber31b030d2013-09-04 01:29:02 +0200759 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000760
761 if (watchpoint)
762 *watchpoint = wp;
763 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000764}
765
aliguoria1d1bb32008-11-18 20:07:32 +0000766/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200767int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000768 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000769{
aliguoria1d1bb32008-11-18 20:07:32 +0000770 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000771
Andreas Färberff4700b2013-08-26 18:23:18 +0200772 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100773 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000774 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200775 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000776 return 0;
777 }
778 }
aliguoria1d1bb32008-11-18 20:07:32 +0000779 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000780}
781
aliguoria1d1bb32008-11-18 20:07:32 +0000782/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200783void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000784{
Andreas Färberff4700b2013-08-26 18:23:18 +0200785 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000786
Andreas Färber31b030d2013-09-04 01:29:02 +0200787 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000788
Anthony Liguori7267c092011-08-20 22:09:37 -0500789 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000790}
791
aliguoria1d1bb32008-11-18 20:07:32 +0000792/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200793void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000794{
aliguoric0ce9982008-11-25 22:13:57 +0000795 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000796
Andreas Färberff4700b2013-08-26 18:23:18 +0200797 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200798 if (wp->flags & mask) {
799 cpu_watchpoint_remove_by_ref(cpu, wp);
800 }
aliguoric0ce9982008-11-25 22:13:57 +0000801 }
aliguoria1d1bb32008-11-18 20:07:32 +0000802}
Peter Maydell05068c02014-09-12 14:06:48 +0100803
804/* Return true if this watchpoint address matches the specified
805 * access (ie the address range covered by the watchpoint overlaps
806 * partially or completely with the address range covered by the
807 * access).
808 */
809static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
810 vaddr addr,
811 vaddr len)
812{
813 /* We know the lengths are non-zero, but a little caution is
814 * required to avoid errors in the case where the range ends
815 * exactly at the top of the address space and so addr + len
816 * wraps round to zero.
817 */
818 vaddr wpend = wp->vaddr + wp->len - 1;
819 vaddr addrend = addr + len - 1;
820
821 return !(addr > wpend || wp->vaddr > addrend);
822}
823
Paul Brookc527ee82010-03-01 03:31:14 +0000824#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000825
826/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200827int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000828 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000829{
aliguoric0ce9982008-11-25 22:13:57 +0000830 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000831
Anthony Liguori7267c092011-08-20 22:09:37 -0500832 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000833
834 bp->pc = pc;
835 bp->flags = flags;
836
aliguori2dc9f412008-11-18 20:56:59 +0000837 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200839 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200840 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200842 }
aliguoria1d1bb32008-11-18 20:07:32 +0000843
Andreas Färberf0c3c502013-08-26 21:22:53 +0200844 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000845
Andreas Färber00b941e2013-06-29 18:55:54 +0200846 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000847 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200848 }
aliguoria1d1bb32008-11-18 20:07:32 +0000849 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000850}
851
852/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200853int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000854{
aliguoria1d1bb32008-11-18 20:07:32 +0000855 CPUBreakpoint *bp;
856
Andreas Färberf0c3c502013-08-26 21:22:53 +0200857 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000858 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200859 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000860 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000861 }
bellard4c3a88a2003-07-26 12:06:08 +0000862 }
aliguoria1d1bb32008-11-18 20:07:32 +0000863 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000864}
865
aliguoria1d1bb32008-11-18 20:07:32 +0000866/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200867void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000868{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200869 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
870
871 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000872
Anthony Liguori7267c092011-08-20 22:09:37 -0500873 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000874}
875
876/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200877void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000878{
aliguoric0ce9982008-11-25 22:13:57 +0000879 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000880
Andreas Färberf0c3c502013-08-26 21:22:53 +0200881 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200882 if (bp->flags & mask) {
883 cpu_breakpoint_remove_by_ref(cpu, bp);
884 }
aliguoric0ce9982008-11-25 22:13:57 +0000885 }
bellard4c3a88a2003-07-26 12:06:08 +0000886}
887
bellardc33a3462003-07-29 20:50:33 +0000888/* enable or disable single step mode. EXCP_DEBUG is returned by the
889 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200890void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000891{
Andreas Färbered2803d2013-06-21 20:20:45 +0200892 if (cpu->singlestep_enabled != enabled) {
893 cpu->singlestep_enabled = enabled;
894 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200895 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200896 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100897 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000898 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700899 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000900 }
bellardc33a3462003-07-29 20:50:33 +0000901 }
bellardc33a3462003-07-29 20:50:33 +0000902}
903
Andreas Färbera47dddd2013-09-03 17:38:47 +0200904void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000905{
906 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000907 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000908
909 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000910 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000911 fprintf(stderr, "qemu: fatal: ");
912 vfprintf(stderr, fmt, ap);
913 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200914 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100915 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000916 qemu_log("qemu: fatal: ");
917 qemu_log_vprintf(fmt, ap2);
918 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200919 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000920 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000921 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000922 }
pbrook493ae1f2007-11-23 16:53:59 +0000923 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000924 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300925 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200926#if defined(CONFIG_USER_ONLY)
927 {
928 struct sigaction act;
929 sigfillset(&act.sa_mask);
930 act.sa_handler = SIG_DFL;
931 sigaction(SIGABRT, &act, NULL);
932 }
933#endif
bellard75012672003-06-21 13:11:07 +0000934 abort();
935}
936
bellard01243112004-01-04 15:48:17 +0000937#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400938/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
940{
941 RAMBlock *block;
942
Paolo Bonzini43771532013-09-09 17:58:40 +0200943 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200944 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200945 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200946 }
Mike Day0dc3f442013-09-05 14:41:35 -0400947 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200948 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 goto found;
950 }
951 }
952
953 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
954 abort();
955
956found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200957 /* It is safe to write mru_block outside the iothread lock. This
958 * is what happens:
959 *
960 * mru_block = xxx
961 * rcu_read_unlock()
962 * xxx removed from list
963 * rcu_read_lock()
964 * read mru_block
965 * mru_block = NULL;
966 * call_rcu(reclaim_ramblock, xxx);
967 * rcu_read_unlock()
968 *
969 * atomic_rcu_set is not needed here. The block was already published
970 * when it was placed into the list. Here we're just making an extra
971 * copy of the pointer.
972 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200973 ram_list.mru_block = block;
974 return block;
975}
976
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200977static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000978{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700979 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200980 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200981 RAMBlock *block;
982 ram_addr_t end;
983
984 end = TARGET_PAGE_ALIGN(start + length);
985 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000986
Mike Day0dc3f442013-09-05 14:41:35 -0400987 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200988 block = qemu_get_ram_block(start);
989 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200990 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700991 CPU_FOREACH(cpu) {
992 tlb_reset_dirty(cpu, start1, length);
993 }
Mike Day0dc3f442013-09-05 14:41:35 -0400994 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200995}
996
997/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000998bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
999 ram_addr_t length,
1000 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001001{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001002 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001003 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001004 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001005
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001006 if (length == 0) {
1007 return false;
1008 }
1009
1010 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1011 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001012
1013 rcu_read_lock();
1014
1015 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1016
1017 while (page < end) {
1018 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1019 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1020 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1021
1022 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1023 offset, num);
1024 page += num;
1025 }
1026
1027 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001028
1029 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001030 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001031 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001032
1033 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001034}
1035
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001036/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001037hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001038 MemoryRegionSection *section,
1039 target_ulong vaddr,
1040 hwaddr paddr, hwaddr xlat,
1041 int prot,
1042 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001043{
Avi Kivitya8170e52012-10-23 12:30:10 +02001044 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001045 CPUWatchpoint *wp;
1046
Blue Swirlcc5bea62012-04-14 14:56:48 +00001047 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001048 /* Normal RAM. */
1049 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001050 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001051 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001052 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001053 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001054 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001055 }
1056 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001057 AddressSpaceDispatch *d;
1058
1059 d = atomic_rcu_read(&section->address_space->dispatch);
1060 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001061 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001062 }
1063
1064 /* Make accesses to pages with watchpoints go via the
1065 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001066 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001067 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001068 /* Avoid trapping reads of pages with a write breakpoint. */
1069 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001070 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001071 *address |= TLB_MMIO;
1072 break;
1073 }
1074 }
1075 }
1076
1077 return iotlb;
1078}
bellard9fa3e852004-01-04 18:06:42 +00001079#endif /* defined(CONFIG_USER_ONLY) */
1080
pbrooke2eef172008-06-08 01:09:01 +00001081#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001082
Anthony Liguoric227f092009-10-01 16:12:16 -05001083static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001084 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001085static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001086
Igor Mammedova2b257d2014-10-31 16:38:37 +00001087static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1088 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001089
1090/*
1091 * Set a custom physical guest memory alloator.
1092 * Accelerators with unusual needs may need this. Hopefully, we can
1093 * get rid of it eventually.
1094 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001095void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001096{
1097 phys_mem_alloc = alloc;
1098}
1099
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001100static uint16_t phys_section_add(PhysPageMap *map,
1101 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001102{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001103 /* The physical section number is ORed with a page-aligned
1104 * pointer to produce the iotlb entries. Thus it should
1105 * never overflow into the page-aligned value.
1106 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001107 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001108
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001109 if (map->sections_nb == map->sections_nb_alloc) {
1110 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1111 map->sections = g_renew(MemoryRegionSection, map->sections,
1112 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001113 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001114 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001115 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001116 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001117}
1118
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001119static void phys_section_destroy(MemoryRegion *mr)
1120{
Don Slutz55b4e802015-11-30 17:11:04 -05001121 bool have_sub_page = mr->subpage;
1122
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001123 memory_region_unref(mr);
1124
Don Slutz55b4e802015-11-30 17:11:04 -05001125 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001126 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001127 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001128 g_free(subpage);
1129 }
1130}
1131
Paolo Bonzini60926662013-05-29 12:30:26 +02001132static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001133{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001134 while (map->sections_nb > 0) {
1135 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001136 phys_section_destroy(section->mr);
1137 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001138 g_free(map->sections);
1139 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001140}
1141
Avi Kivityac1970f2012-10-03 16:22:53 +02001142static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001143{
1144 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001145 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001147 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001148 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149 MemoryRegionSection subsection = {
1150 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001151 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001153 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001154
Avi Kivityf3705d52012-03-08 16:16:34 +02001155 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001156
Avi Kivityf3705d52012-03-08 16:16:34 +02001157 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001158 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001159 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001160 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001161 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001162 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001163 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001164 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001165 }
1166 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001167 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001168 subpage_register(subpage, start, end,
1169 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001170}
1171
1172
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001173static void register_multipage(AddressSpaceDispatch *d,
1174 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001175{
Avi Kivitya8170e52012-10-23 12:30:10 +02001176 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001177 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001178 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1179 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001180
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001181 assert(num_pages);
1182 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001183}
1184
Avi Kivityac1970f2012-10-03 16:22:53 +02001185static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001186{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001187 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001188 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001189 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001190 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001191
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001192 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1193 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1194 - now.offset_within_address_space;
1195
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001196 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001197 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001198 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001199 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001200 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001201 while (int128_ne(remain.size, now.size)) {
1202 remain.size = int128_sub(remain.size, now.size);
1203 remain.offset_within_address_space += int128_get64(now.size);
1204 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001205 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001206 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001207 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001208 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001209 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001210 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001211 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001212 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001213 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001214 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001215 }
1216}
1217
Sheng Yang62a27442010-01-26 19:21:16 +08001218void qemu_flush_coalesced_mmio_buffer(void)
1219{
1220 if (kvm_enabled())
1221 kvm_flush_coalesced_mmio_buffer();
1222}
1223
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001224void qemu_mutex_lock_ramlist(void)
1225{
1226 qemu_mutex_lock(&ram_list.mutex);
1227}
1228
1229void qemu_mutex_unlock_ramlist(void)
1230{
1231 qemu_mutex_unlock(&ram_list.mutex);
1232}
1233
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001234#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001235static void *file_ram_alloc(RAMBlock *block,
1236 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001237 const char *path,
1238 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001239{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001240 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001241 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001242 char *sanitized_name;
1243 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001244 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001245 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001246 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001247
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001248 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1249 error_setg(errp,
1250 "host lacks kvm mmu notifiers, -mem-path unsupported");
1251 return NULL;
1252 }
1253
1254 for (;;) {
1255 fd = open(path, O_RDWR);
1256 if (fd >= 0) {
1257 /* @path names an existing file, use it */
1258 break;
1259 }
1260 if (errno == ENOENT) {
1261 /* @path names a file that doesn't exist, create it */
1262 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1263 if (fd >= 0) {
1264 unlink_on_error = true;
1265 break;
1266 }
1267 } else if (errno == EISDIR) {
1268 /* @path names a directory, create a file there */
1269 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1270 sanitized_name = g_strdup(memory_region_name(block->mr));
1271 for (c = sanitized_name; *c != '\0'; c++) {
1272 if (*c == '/') {
1273 *c = '_';
1274 }
1275 }
1276
1277 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1278 sanitized_name);
1279 g_free(sanitized_name);
1280
1281 fd = mkstemp(filename);
1282 if (fd >= 0) {
1283 unlink(filename);
1284 g_free(filename);
1285 break;
1286 }
1287 g_free(filename);
1288 }
1289 if (errno != EEXIST && errno != EINTR) {
1290 error_setg_errno(errp, errno,
1291 "can't open backing store %s for guest RAM",
1292 path);
1293 goto error;
1294 }
1295 /*
1296 * Try again on EINTR and EEXIST. The latter happens when
1297 * something else creates the file between our two open().
1298 */
1299 }
1300
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001301 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001302 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001303
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001304 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001305 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001306 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001307 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001308 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001309 }
1310
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001311 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001312
1313 /*
1314 * ftruncate is not supported by hugetlbfs in older
1315 * hosts, so don't bother bailing out on errors.
1316 * If anything goes wrong with it under other filesystems,
1317 * mmap will fail.
1318 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001319 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001320 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001321 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001322
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001323 area = qemu_ram_mmap(fd, memory, block->mr->align,
1324 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001325 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001326 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001327 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001328 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001329 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001330
1331 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001332 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001333 }
1334
Alex Williamson04b16652010-07-02 11:13:17 -06001335 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001336 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001337
1338error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001339 if (unlink_on_error) {
1340 unlink(path);
1341 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001342 if (fd != -1) {
1343 close(fd);
1344 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001345 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001346}
1347#endif
1348
Mike Day0dc3f442013-09-05 14:41:35 -04001349/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001350static ram_addr_t find_ram_offset(ram_addr_t size)
1351{
Alex Williamson04b16652010-07-02 11:13:17 -06001352 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001353 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001354
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001355 assert(size != 0); /* it would hand out same offset multiple times */
1356
Mike Day0dc3f442013-09-05 14:41:35 -04001357 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001358 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001359 }
Alex Williamson04b16652010-07-02 11:13:17 -06001360
Mike Day0dc3f442013-09-05 14:41:35 -04001361 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001362 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001363
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001364 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001365
Mike Day0dc3f442013-09-05 14:41:35 -04001366 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001367 if (next_block->offset >= end) {
1368 next = MIN(next, next_block->offset);
1369 }
1370 }
1371 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001372 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001373 mingap = next - end;
1374 }
1375 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001376
1377 if (offset == RAM_ADDR_MAX) {
1378 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1379 (uint64_t)size);
1380 abort();
1381 }
1382
Alex Williamson04b16652010-07-02 11:13:17 -06001383 return offset;
1384}
1385
Juan Quintela652d7ec2012-07-20 10:37:54 +02001386ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001387{
Alex Williamsond17b5282010-06-25 11:08:38 -06001388 RAMBlock *block;
1389 ram_addr_t last = 0;
1390
Mike Day0dc3f442013-09-05 14:41:35 -04001391 rcu_read_lock();
1392 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001393 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001394 }
Mike Day0dc3f442013-09-05 14:41:35 -04001395 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001396 return last;
1397}
1398
Jason Baronddb97f12012-08-02 15:44:16 -04001399static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1400{
1401 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001402
1403 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001404 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001405 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1406 if (ret) {
1407 perror("qemu_madvise");
1408 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1409 "but dump_guest_core=off specified\n");
1410 }
1411 }
1412}
1413
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001414const char *qemu_ram_get_idstr(RAMBlock *rb)
1415{
1416 return rb->idstr;
1417}
1418
Mike Dayae3a7042013-09-05 14:41:35 -04001419/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001420void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001421{
Gongleifa53a0e2016-05-10 10:04:59 +08001422 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001423
Mike Day0dc3f442013-09-05 14:41:35 -04001424 rcu_read_lock();
Gongleifa53a0e2016-05-10 10:04:59 +08001425
Avi Kivityc5705a72011-12-20 15:59:12 +02001426 assert(new_block);
1427 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001428
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001429 if (dev) {
1430 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001431 if (id) {
1432 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001433 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001434 }
1435 }
1436 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1437
Mike Day0dc3f442013-09-05 14:41:35 -04001438 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001439 if (block != new_block &&
1440 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001441 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1442 new_block->idstr);
1443 abort();
1444 }
1445 }
Mike Day0dc3f442013-09-05 14:41:35 -04001446 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001447}
1448
Mike Dayae3a7042013-09-05 14:41:35 -04001449/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001450void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001451{
Mike Dayae3a7042013-09-05 14:41:35 -04001452 /* FIXME: arch_init.c assumes that this is not called throughout
1453 * migration. Ignore the problem since hot-unplug during migration
1454 * does not work anyway.
1455 */
1456
Mike Day0dc3f442013-09-05 14:41:35 -04001457 rcu_read_lock();
Hu Tao20cfe882014-04-02 15:13:26 +08001458 if (block) {
1459 memset(block->idstr, 0, sizeof(block->idstr));
1460 }
Mike Day0dc3f442013-09-05 14:41:35 -04001461 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001462}
1463
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001464static int memory_try_enable_merging(void *addr, size_t len)
1465{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001466 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001467 /* disabled by the user */
1468 return 0;
1469 }
1470
1471 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1472}
1473
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001474/* Only legal before guest might have detected the memory size: e.g. on
1475 * incoming migration, or right after reset.
1476 *
1477 * As memory core doesn't know how is memory accessed, it is up to
1478 * resize callback to update device state and/or add assertions to detect
1479 * misuse, if necessary.
1480 */
Gongleifa53a0e2016-05-10 10:04:59 +08001481int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001482{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001483 assert(block);
1484
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001485 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001486
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001487 if (block->used_length == newsize) {
1488 return 0;
1489 }
1490
1491 if (!(block->flags & RAM_RESIZEABLE)) {
1492 error_setg_errno(errp, EINVAL,
1493 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1494 " in != 0x" RAM_ADDR_FMT, block->idstr,
1495 newsize, block->used_length);
1496 return -EINVAL;
1497 }
1498
1499 if (block->max_length < newsize) {
1500 error_setg_errno(errp, EINVAL,
1501 "Length too large: %s: 0x" RAM_ADDR_FMT
1502 " > 0x" RAM_ADDR_FMT, block->idstr,
1503 newsize, block->max_length);
1504 return -EINVAL;
1505 }
1506
1507 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1508 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001509 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1510 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001511 memory_region_set_size(block->mr, newsize);
1512 if (block->resized) {
1513 block->resized(block->idstr, newsize, block->host);
1514 }
1515 return 0;
1516}
1517
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001518/* Called with ram_list.mutex held */
1519static void dirty_memory_extend(ram_addr_t old_ram_size,
1520 ram_addr_t new_ram_size)
1521{
1522 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1523 DIRTY_MEMORY_BLOCK_SIZE);
1524 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1525 DIRTY_MEMORY_BLOCK_SIZE);
1526 int i;
1527
1528 /* Only need to extend if block count increased */
1529 if (new_num_blocks <= old_num_blocks) {
1530 return;
1531 }
1532
1533 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1534 DirtyMemoryBlocks *old_blocks;
1535 DirtyMemoryBlocks *new_blocks;
1536 int j;
1537
1538 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1539 new_blocks = g_malloc(sizeof(*new_blocks) +
1540 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1541
1542 if (old_num_blocks) {
1543 memcpy(new_blocks->blocks, old_blocks->blocks,
1544 old_num_blocks * sizeof(old_blocks->blocks[0]));
1545 }
1546
1547 for (j = old_num_blocks; j < new_num_blocks; j++) {
1548 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1549 }
1550
1551 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1552
1553 if (old_blocks) {
1554 g_free_rcu(old_blocks, rcu);
1555 }
1556 }
1557}
1558
Fam Zheng528f46a2016-03-01 14:18:18 +08001559static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001560{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001561 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001562 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001563 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001564 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001565
1566 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001567
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001568 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001569 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001570
1571 if (!new_block->host) {
1572 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001573 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001574 new_block->mr, &err);
1575 if (err) {
1576 error_propagate(errp, err);
1577 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001578 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001579 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001580 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001581 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001582 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001583 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001584 error_setg_errno(errp, errno,
1585 "cannot set up guest memory '%s'",
1586 memory_region_name(new_block->mr));
1587 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001588 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001589 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001590 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001591 }
1592 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001593
Li Zhijiandd631692015-07-02 20:18:06 +08001594 new_ram_size = MAX(old_ram_size,
1595 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1596 if (new_ram_size > old_ram_size) {
1597 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001598 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001599 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001600 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1601 * QLIST (which has an RCU-friendly variant) does not have insertion at
1602 * tail, so save the last element in last_block.
1603 */
Mike Day0dc3f442013-09-05 14:41:35 -04001604 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001605 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001606 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001607 break;
1608 }
1609 }
1610 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001611 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001612 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001613 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001614 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001615 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001616 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001617 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001618
Mike Day0dc3f442013-09-05 14:41:35 -04001619 /* Write list before version */
1620 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001621 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001622 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001623
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001624 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001625 new_block->used_length,
1626 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001627
Paolo Bonzinia904c912015-01-21 16:18:35 +01001628 if (new_block->host) {
1629 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1630 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1631 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1632 if (kvm_enabled()) {
1633 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1634 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001635 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001636}
1637
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001638#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001639RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1640 bool share, const char *mem_path,
1641 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642{
1643 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001644 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001645
1646 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001647 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001648 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001649 }
1650
1651 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1652 /*
1653 * file_ram_alloc() needs to allocate just like
1654 * phys_mem_alloc, but we haven't bothered to provide
1655 * a hook there.
1656 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001657 error_setg(errp,
1658 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001659 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001660 }
1661
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001662 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001663 new_block = g_malloc0(sizeof(*new_block));
1664 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001665 new_block->used_length = size;
1666 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001667 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001668 new_block->host = file_ram_alloc(new_block, size,
1669 mem_path, errp);
1670 if (!new_block->host) {
1671 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001672 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001673 }
1674
Fam Zheng528f46a2016-03-01 14:18:18 +08001675 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001676 if (local_err) {
1677 g_free(new_block);
1678 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001679 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001680 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001681 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001682}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001683#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001684
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001685static
Fam Zheng528f46a2016-03-01 14:18:18 +08001686RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1687 void (*resized)(const char*,
1688 uint64_t length,
1689 void *host),
1690 void *host, bool resizeable,
1691 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001692{
1693 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001694 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001695
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001696 size = HOST_PAGE_ALIGN(size);
1697 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001698 new_block = g_malloc0(sizeof(*new_block));
1699 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001700 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001701 new_block->used_length = size;
1702 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001703 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001704 new_block->fd = -1;
1705 new_block->host = host;
1706 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001707 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001708 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001709 if (resizeable) {
1710 new_block->flags |= RAM_RESIZEABLE;
1711 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001712 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001713 if (local_err) {
1714 g_free(new_block);
1715 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001716 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001717 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001718 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001719}
1720
Fam Zheng528f46a2016-03-01 14:18:18 +08001721RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001722 MemoryRegion *mr, Error **errp)
1723{
1724 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1725}
1726
Fam Zheng528f46a2016-03-01 14:18:18 +08001727RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001728{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001729 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1730}
1731
Fam Zheng528f46a2016-03-01 14:18:18 +08001732RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001733 void (*resized)(const char*,
1734 uint64_t length,
1735 void *host),
1736 MemoryRegion *mr, Error **errp)
1737{
1738 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001739}
bellarde9a1ab12007-02-08 23:08:38 +00001740
Paolo Bonzini43771532013-09-09 17:58:40 +02001741static void reclaim_ramblock(RAMBlock *block)
1742{
1743 if (block->flags & RAM_PREALLOC) {
1744 ;
1745 } else if (xen_enabled()) {
1746 xen_invalidate_map_cache_entry(block->host);
1747#ifndef _WIN32
1748 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001749 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001750 close(block->fd);
1751#endif
1752 } else {
1753 qemu_anon_ram_free(block->host, block->max_length);
1754 }
1755 g_free(block);
1756}
1757
Fam Zhengf1060c52016-03-01 14:18:22 +08001758void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001759{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001760 if (!block) {
1761 return;
1762 }
1763
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001764 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001765 QLIST_REMOVE_RCU(block, next);
1766 ram_list.mru_block = NULL;
1767 /* Write list before version */
1768 smp_wmb();
1769 ram_list.version++;
1770 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001771 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001772}
1773
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774#ifndef _WIN32
1775void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1776{
1777 RAMBlock *block;
1778 ram_addr_t offset;
1779 int flags;
1780 void *area, *vaddr;
1781
Mike Day0dc3f442013-09-05 14:41:35 -04001782 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001783 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001784 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001785 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001786 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001787 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001788 } else if (xen_enabled()) {
1789 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001790 } else {
1791 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001792 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001793 flags |= (block->flags & RAM_SHARED ?
1794 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001795 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1796 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001797 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001798 /*
1799 * Remap needs to match alloc. Accelerators that
1800 * set phys_mem_alloc never remap. If they did,
1801 * we'd need a remap hook here.
1802 */
1803 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1804
Huang Yingcd19cfa2011-03-02 08:56:19 +01001805 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1806 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1807 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808 }
1809 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001810 fprintf(stderr, "Could not remap addr: "
1811 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001812 length, addr);
1813 exit(1);
1814 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001815 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001816 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001817 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001818 }
1819 }
1820}
1821#endif /* !_WIN32 */
1822
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001823int qemu_get_ram_fd(ram_addr_t addr)
1824{
Mike Dayae3a7042013-09-05 14:41:35 -04001825 RAMBlock *block;
1826 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001827
Mike Day0dc3f442013-09-05 14:41:35 -04001828 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001829 block = qemu_get_ram_block(addr);
1830 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001831 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001832 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001833}
1834
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001835void qemu_set_ram_fd(ram_addr_t addr, int fd)
1836{
1837 RAMBlock *block;
1838
1839 rcu_read_lock();
1840 block = qemu_get_ram_block(addr);
1841 block->fd = fd;
1842 rcu_read_unlock();
1843}
1844
Damjan Marion3fd74b82014-06-26 23:01:32 +02001845void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1846{
Mike Dayae3a7042013-09-05 14:41:35 -04001847 RAMBlock *block;
1848 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001849
Mike Day0dc3f442013-09-05 14:41:35 -04001850 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001851 block = qemu_get_ram_block(addr);
1852 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001853 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001854 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001855}
1856
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001857/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001858 * This should not be used for general purpose DMA. Use address_space_map
1859 * or address_space_rw instead. For local memory (e.g. video ram) that the
1860 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001861 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001862 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001863 */
Gonglei3655cb92016-02-20 10:35:20 +08001864void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001865{
Gonglei3655cb92016-02-20 10:35:20 +08001866 RAMBlock *block = ram_block;
1867
1868 if (block == NULL) {
1869 block = qemu_get_ram_block(addr);
1870 }
Mike Dayae3a7042013-09-05 14:41:35 -04001871
1872 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001873 /* We need to check if the requested address is in the RAM
1874 * because we don't want to map the entire memory in QEMU.
1875 * In that case just map until the end of the page.
1876 */
1877 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001878 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001879 }
Mike Dayae3a7042013-09-05 14:41:35 -04001880
1881 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001882 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001883 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001884}
1885
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001886/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001887 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001888 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001889 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001890 */
Gonglei3655cb92016-02-20 10:35:20 +08001891static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1892 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001893{
Gonglei3655cb92016-02-20 10:35:20 +08001894 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001895 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001896 if (*size == 0) {
1897 return NULL;
1898 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001899
Gonglei3655cb92016-02-20 10:35:20 +08001900 if (block == NULL) {
1901 block = qemu_get_ram_block(addr);
1902 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001903 offset_inside_block = addr - block->offset;
1904 *size = MIN(*size, block->max_length - offset_inside_block);
1905
1906 if (xen_enabled() && block->host == NULL) {
1907 /* We need to check if the requested address is in the RAM
1908 * because we don't want to map the entire memory in QEMU.
1909 * In that case just map the requested area.
1910 */
1911 if (block->offset == 0) {
1912 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001913 }
1914
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001915 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001916 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001917
1918 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001919}
1920
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001921/*
1922 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1923 * in that RAMBlock.
1924 *
1925 * ptr: Host pointer to look up
1926 * round_offset: If true round the result offset down to a page boundary
1927 * *ram_addr: set to result ram_addr
1928 * *offset: set to result offset within the RAMBlock
1929 *
1930 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001931 *
1932 * By the time this function returns, the returned pointer is not protected
1933 * by RCU anymore. If the caller is not within an RCU critical section and
1934 * does not hold the iothread lock, it must have other means of protecting the
1935 * pointer, such as a reference to the region that includes the incoming
1936 * ram_addr_t.
1937 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1939 ram_addr_t *ram_addr,
1940 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001941{
pbrook94a6b542009-04-11 17:15:54 +00001942 RAMBlock *block;
1943 uint8_t *host = ptr;
1944
Jan Kiszka868bb332011-06-21 22:59:09 +02001945 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001946 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001947 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001948 block = qemu_get_ram_block(*ram_addr);
1949 if (block) {
1950 *offset = (host - block->host);
1951 }
Mike Day0dc3f442013-09-05 14:41:35 -04001952 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001953 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001954 }
1955
Mike Day0dc3f442013-09-05 14:41:35 -04001956 rcu_read_lock();
1957 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001958 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001959 goto found;
1960 }
1961
Mike Day0dc3f442013-09-05 14:41:35 -04001962 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001963 /* This case append when the block is not mapped. */
1964 if (block->host == NULL) {
1965 continue;
1966 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001967 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001968 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001969 }
pbrook94a6b542009-04-11 17:15:54 +00001970 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001971
Mike Day0dc3f442013-09-05 14:41:35 -04001972 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001973 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001974
1975found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001976 *offset = (host - block->host);
1977 if (round_offset) {
1978 *offset &= TARGET_PAGE_MASK;
1979 }
1980 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001981 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001982 return block;
1983}
1984
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001985/*
1986 * Finds the named RAMBlock
1987 *
1988 * name: The name of RAMBlock to find
1989 *
1990 * Returns: RAMBlock (or NULL if not found)
1991 */
1992RAMBlock *qemu_ram_block_by_name(const char *name)
1993{
1994 RAMBlock *block;
1995
1996 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1997 if (!strcmp(name, block->idstr)) {
1998 return block;
1999 }
2000 }
2001
2002 return NULL;
2003}
2004
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002005/* Some of the softmmu routines need to translate from a host pointer
2006 (typically a TLB entry) back to a ram offset. */
2007MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2008{
2009 RAMBlock *block;
2010 ram_addr_t offset; /* Not used */
2011
2012 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2013
2014 if (!block) {
2015 return NULL;
2016 }
2017
2018 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002019}
Alex Williamsonf471a172010-06-11 11:11:42 -06002020
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002021/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002022static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002023 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002024{
Juan Quintela52159192013-10-08 12:44:04 +02002025 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002026 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002027 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002028 switch (size) {
2029 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002030 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002031 break;
2032 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002033 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002034 break;
2035 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002036 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002037 break;
2038 default:
2039 abort();
2040 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002041 /* Set both VGA and migration bits for simplicity and to remove
2042 * the notdirty callback faster.
2043 */
2044 cpu_physical_memory_set_dirty_range(ram_addr, size,
2045 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002046 /* we remove the notdirty callback only if the code has been
2047 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002048 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002049 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002050 }
bellard1ccde1c2004-02-06 19:46:14 +00002051}
2052
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002053static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2054 unsigned size, bool is_write)
2055{
2056 return is_write;
2057}
2058
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002059static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002060 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002061 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002062 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002063};
2064
pbrook0f459d12008-06-09 00:20:13 +00002065/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002066static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002067{
Andreas Färber93afead2013-08-26 03:41:01 +02002068 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002069 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002070 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002071 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002072 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002073 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002074 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002075
Andreas Färberff4700b2013-08-26 18:23:18 +02002076 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002077 /* We re-entered the check after replacing the TB. Now raise
2078 * the debug interrupt so that is will trigger after the
2079 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002080 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002081 return;
2082 }
Andreas Färber93afead2013-08-26 03:41:01 +02002083 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002084 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002085 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2086 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002087 if (flags == BP_MEM_READ) {
2088 wp->flags |= BP_WATCHPOINT_HIT_READ;
2089 } else {
2090 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2091 }
2092 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002093 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002094 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002095 if (wp->flags & BP_CPU &&
2096 !cc->debug_check_watchpoint(cpu, wp)) {
2097 wp->flags &= ~BP_WATCHPOINT_HIT;
2098 continue;
2099 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002100 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002101 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002102 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002103 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002104 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002105 } else {
2106 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002107 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002108 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002109 }
aliguori06d55cc2008-11-18 20:24:06 +00002110 }
aliguori6e140f22008-11-18 20:37:55 +00002111 } else {
2112 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002113 }
2114 }
2115}
2116
pbrook6658ffb2007-03-16 23:58:11 +00002117/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2118 so these check for a hit then pass through to the normal out-of-line
2119 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002120static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2121 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002122{
Peter Maydell66b9b432015-04-26 16:49:24 +01002123 MemTxResult res;
2124 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002125 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2126 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002127
Peter Maydell66b9b432015-04-26 16:49:24 +01002128 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002129 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002130 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002131 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002132 break;
2133 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002134 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002135 break;
2136 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002137 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002138 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002139 default: abort();
2140 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002141 *pdata = data;
2142 return res;
2143}
2144
2145static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2146 uint64_t val, unsigned size,
2147 MemTxAttrs attrs)
2148{
2149 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002150 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2151 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002152
2153 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2154 switch (size) {
2155 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002156 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002157 break;
2158 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002159 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002160 break;
2161 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002162 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002163 break;
2164 default: abort();
2165 }
2166 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002167}
2168
Avi Kivity1ec9b902012-01-02 12:47:48 +02002169static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002170 .read_with_attrs = watch_mem_read,
2171 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002172 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002173};
pbrook6658ffb2007-03-16 23:58:11 +00002174
Peter Maydellf25a49e2015-04-26 16:49:24 +01002175static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2176 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002177{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002178 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002179 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002180 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002181
blueswir1db7b5422007-05-26 17:36:03 +00002182#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002183 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002184 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002185#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002186 res = address_space_read(subpage->as, addr + subpage->base,
2187 attrs, buf, len);
2188 if (res) {
2189 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002190 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002191 switch (len) {
2192 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002193 *data = ldub_p(buf);
2194 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002195 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002196 *data = lduw_p(buf);
2197 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002198 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002199 *data = ldl_p(buf);
2200 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002201 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002202 *data = ldq_p(buf);
2203 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002204 default:
2205 abort();
2206 }
blueswir1db7b5422007-05-26 17:36:03 +00002207}
2208
Peter Maydellf25a49e2015-04-26 16:49:24 +01002209static MemTxResult subpage_write(void *opaque, hwaddr addr,
2210 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002211{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002212 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002213 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002214
blueswir1db7b5422007-05-26 17:36:03 +00002215#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002216 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002217 " value %"PRIx64"\n",
2218 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002219#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 switch (len) {
2221 case 1:
2222 stb_p(buf, value);
2223 break;
2224 case 2:
2225 stw_p(buf, value);
2226 break;
2227 case 4:
2228 stl_p(buf, value);
2229 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002230 case 8:
2231 stq_p(buf, value);
2232 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002233 default:
2234 abort();
2235 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002236 return address_space_write(subpage->as, addr + subpage->base,
2237 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002238}
2239
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002240static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002241 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002242{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002243 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002244#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002245 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002246 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002247#endif
2248
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002249 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002250 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002251}
2252
Avi Kivity70c68e42012-01-02 12:32:48 +02002253static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002254 .read_with_attrs = subpage_read,
2255 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002256 .impl.min_access_size = 1,
2257 .impl.max_access_size = 8,
2258 .valid.min_access_size = 1,
2259 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002260 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002261 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002262};
2263
Anthony Liguoric227f092009-10-01 16:12:16 -05002264static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002265 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002266{
2267 int idx, eidx;
2268
2269 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2270 return -1;
2271 idx = SUBPAGE_IDX(start);
2272 eidx = SUBPAGE_IDX(end);
2273#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002274 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2275 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002276#endif
blueswir1db7b5422007-05-26 17:36:03 +00002277 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002278 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002279 }
2280
2281 return 0;
2282}
2283
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002284static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002285{
Anthony Liguoric227f092009-10-01 16:12:16 -05002286 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002287
Anthony Liguori7267c092011-08-20 22:09:37 -05002288 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002289
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002290 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002291 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002292 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002293 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002294 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002295#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002296 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2297 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002298#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002299 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002300
2301 return mmio;
2302}
2303
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002304static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2305 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002306{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002307 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002308 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002309 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002310 .mr = mr,
2311 .offset_within_address_space = 0,
2312 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002313 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002314 };
2315
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002316 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002317}
2318
Peter Maydella54c87b2016-01-21 14:15:05 +00002319MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002320{
Peter Maydella54c87b2016-01-21 14:15:05 +00002321 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2322 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002323 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002324 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002325
2326 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002327}
2328
Avi Kivitye9179ce2009-06-14 11:38:52 +03002329static void io_mem_init(void)
2330{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002331 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002332 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002333 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002334 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002335 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002336 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002337 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002338}
2339
Avi Kivityac1970f2012-10-03 16:22:53 +02002340static void mem_begin(MemoryListener *listener)
2341{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002342 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002343 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2344 uint16_t n;
2345
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002346 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002347 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002348 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002349 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002350 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002351 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002352 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002353 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002354
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002355 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002356 d->as = as;
2357 as->next_dispatch = d;
2358}
2359
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002360static void address_space_dispatch_free(AddressSpaceDispatch *d)
2361{
2362 phys_sections_free(&d->map);
2363 g_free(d);
2364}
2365
Paolo Bonzini00752702013-05-29 12:13:54 +02002366static void mem_commit(MemoryListener *listener)
2367{
2368 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002369 AddressSpaceDispatch *cur = as->dispatch;
2370 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002371
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002372 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002373
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002374 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002375 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002376 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002377 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002378}
2379
Avi Kivity1d711482012-10-02 18:54:45 +02002380static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002381{
Peter Maydell32857f42015-10-01 15:29:50 +01002382 CPUAddressSpace *cpuas;
2383 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002384
2385 /* since each CPU stores ram addresses in its TLB cache, we must
2386 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002387 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2388 cpu_reloading_memory_map();
2389 /* The CPU and TLB are protected by the iothread lock.
2390 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2391 * may have split the RCU critical section.
2392 */
2393 d = atomic_rcu_read(&cpuas->as->dispatch);
2394 cpuas->memory_dispatch = d;
2395 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002396}
2397
Avi Kivityac1970f2012-10-03 16:22:53 +02002398void address_space_init_dispatch(AddressSpace *as)
2399{
Paolo Bonzini00752702013-05-29 12:13:54 +02002400 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002401 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002402 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002403 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002404 .region_add = mem_add,
2405 .region_nop = mem_add,
2406 .priority = 0,
2407 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002408 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002409}
2410
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002411void address_space_unregister(AddressSpace *as)
2412{
2413 memory_listener_unregister(&as->dispatch_listener);
2414}
2415
Avi Kivity83f3c252012-10-07 12:59:55 +02002416void address_space_destroy_dispatch(AddressSpace *as)
2417{
2418 AddressSpaceDispatch *d = as->dispatch;
2419
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002420 atomic_rcu_set(&as->dispatch, NULL);
2421 if (d) {
2422 call_rcu(d, address_space_dispatch_free, rcu);
2423 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002424}
2425
Avi Kivity62152b82011-07-26 14:26:14 +03002426static void memory_map_init(void)
2427{
Anthony Liguori7267c092011-08-20 22:09:37 -05002428 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002429
Paolo Bonzini57271d62013-11-07 17:14:37 +01002430 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002431 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002432
Anthony Liguori7267c092011-08-20 22:09:37 -05002433 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002434 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2435 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002436 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002437}
2438
2439MemoryRegion *get_system_memory(void)
2440{
2441 return system_memory;
2442}
2443
Avi Kivity309cb472011-08-08 16:09:03 +03002444MemoryRegion *get_system_io(void)
2445{
2446 return system_io;
2447}
2448
pbrooke2eef172008-06-08 01:09:01 +00002449#endif /* !defined(CONFIG_USER_ONLY) */
2450
bellard13eb76e2004-01-24 15:23:36 +00002451/* physical memory access (slow version, mainly for debug) */
2452#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002453int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002454 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002455{
2456 int l, flags;
2457 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002458 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002459
2460 while (len > 0) {
2461 page = addr & TARGET_PAGE_MASK;
2462 l = (page + TARGET_PAGE_SIZE) - addr;
2463 if (l > len)
2464 l = len;
2465 flags = page_get_flags(page);
2466 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002467 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002468 if (is_write) {
2469 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002470 return -1;
bellard579a97f2007-11-11 14:26:47 +00002471 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002472 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002473 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002474 memcpy(p, buf, l);
2475 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002476 } else {
2477 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002478 return -1;
bellard579a97f2007-11-11 14:26:47 +00002479 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002480 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002481 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002482 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002483 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002484 }
2485 len -= l;
2486 buf += l;
2487 addr += l;
2488 }
Paul Brooka68fe892010-03-01 00:08:59 +00002489 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002490}
bellard8df1cd02005-01-28 22:37:22 +00002491
bellard13eb76e2004-01-24 15:23:36 +00002492#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002493
Paolo Bonzini845b6212015-03-23 11:45:53 +01002494static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002495 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002496{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002497 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2498 /* No early return if dirty_log_mask is or becomes 0, because
2499 * cpu_physical_memory_set_dirty_range will still call
2500 * xen_modified_memory.
2501 */
2502 if (dirty_log_mask) {
2503 dirty_log_mask =
2504 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002505 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002506 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2507 tb_invalidate_phys_range(addr, addr + length);
2508 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2509 }
2510 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002511}
2512
Richard Henderson23326162013-07-08 14:55:59 -07002513static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002514{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002515 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002516
2517 /* Regions are assumed to support 1-4 byte accesses unless
2518 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002519 if (access_size_max == 0) {
2520 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002521 }
Richard Henderson23326162013-07-08 14:55:59 -07002522
2523 /* Bound the maximum access by the alignment of the address. */
2524 if (!mr->ops->impl.unaligned) {
2525 unsigned align_size_max = addr & -addr;
2526 if (align_size_max != 0 && align_size_max < access_size_max) {
2527 access_size_max = align_size_max;
2528 }
2529 }
2530
2531 /* Don't attempt accesses larger than the maximum. */
2532 if (l > access_size_max) {
2533 l = access_size_max;
2534 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002535 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002536
2537 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002538}
2539
Jan Kiszka4840f102015-06-18 18:47:22 +02002540static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002541{
Jan Kiszka4840f102015-06-18 18:47:22 +02002542 bool unlocked = !qemu_mutex_iothread_locked();
2543 bool release_lock = false;
2544
2545 if (unlocked && mr->global_locking) {
2546 qemu_mutex_lock_iothread();
2547 unlocked = false;
2548 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002549 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002550 if (mr->flush_coalesced_mmio) {
2551 if (unlocked) {
2552 qemu_mutex_lock_iothread();
2553 }
2554 qemu_flush_coalesced_mmio_buffer();
2555 if (unlocked) {
2556 qemu_mutex_unlock_iothread();
2557 }
2558 }
2559
2560 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002561}
2562
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002563/* Called within RCU critical section. */
2564static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2565 MemTxAttrs attrs,
2566 const uint8_t *buf,
2567 int len, hwaddr addr1,
2568 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002569{
bellard13eb76e2004-01-24 15:23:36 +00002570 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002571 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002572 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002573 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002574
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002575 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002576 if (!memory_access_is_direct(mr, true)) {
2577 release_lock |= prepare_mmio_access(mr);
2578 l = memory_access_size(mr, l, addr1);
2579 /* XXX: could force current_cpu to NULL to avoid
2580 potential bugs */
2581 switch (l) {
2582 case 8:
2583 /* 64 bit write access */
2584 val = ldq_p(buf);
2585 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2586 attrs);
2587 break;
2588 case 4:
2589 /* 32 bit write access */
2590 val = ldl_p(buf);
2591 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2592 attrs);
2593 break;
2594 case 2:
2595 /* 16 bit write access */
2596 val = lduw_p(buf);
2597 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2598 attrs);
2599 break;
2600 case 1:
2601 /* 8 bit write access */
2602 val = ldub_p(buf);
2603 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2604 attrs);
2605 break;
2606 default:
2607 abort();
bellard13eb76e2004-01-24 15:23:36 +00002608 }
2609 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002610 addr1 += memory_region_get_ram_addr(mr);
2611 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002612 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002613 memcpy(ptr, buf, l);
2614 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002615 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002616
2617 if (release_lock) {
2618 qemu_mutex_unlock_iothread();
2619 release_lock = false;
2620 }
2621
bellard13eb76e2004-01-24 15:23:36 +00002622 len -= l;
2623 buf += l;
2624 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002625
2626 if (!len) {
2627 break;
2628 }
2629
2630 l = len;
2631 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002632 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002633
Peter Maydell3b643492015-04-26 16:49:23 +01002634 return result;
bellard13eb76e2004-01-24 15:23:36 +00002635}
bellard8df1cd02005-01-28 22:37:22 +00002636
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002637MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2638 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002639{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002640 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002641 hwaddr addr1;
2642 MemoryRegion *mr;
2643 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002644
2645 if (len > 0) {
2646 rcu_read_lock();
2647 l = len;
2648 mr = address_space_translate(as, addr, &addr1, &l, true);
2649 result = address_space_write_continue(as, addr, attrs, buf, len,
2650 addr1, l, mr);
2651 rcu_read_unlock();
2652 }
2653
2654 return result;
2655}
2656
2657/* Called within RCU critical section. */
2658MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2659 MemTxAttrs attrs, uint8_t *buf,
2660 int len, hwaddr addr1, hwaddr l,
2661 MemoryRegion *mr)
2662{
2663 uint8_t *ptr;
2664 uint64_t val;
2665 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002666 bool release_lock = false;
2667
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002668 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002669 if (!memory_access_is_direct(mr, false)) {
2670 /* I/O case */
2671 release_lock |= prepare_mmio_access(mr);
2672 l = memory_access_size(mr, l, addr1);
2673 switch (l) {
2674 case 8:
2675 /* 64 bit read access */
2676 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2677 attrs);
2678 stq_p(buf, val);
2679 break;
2680 case 4:
2681 /* 32 bit read access */
2682 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2683 attrs);
2684 stl_p(buf, val);
2685 break;
2686 case 2:
2687 /* 16 bit read access */
2688 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2689 attrs);
2690 stw_p(buf, val);
2691 break;
2692 case 1:
2693 /* 8 bit read access */
2694 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2695 attrs);
2696 stb_p(buf, val);
2697 break;
2698 default:
2699 abort();
2700 }
2701 } else {
2702 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002703 ptr = qemu_get_ram_ptr(mr->ram_block,
2704 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002705 memcpy(buf, ptr, l);
2706 }
2707
2708 if (release_lock) {
2709 qemu_mutex_unlock_iothread();
2710 release_lock = false;
2711 }
2712
2713 len -= l;
2714 buf += l;
2715 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002716
2717 if (!len) {
2718 break;
2719 }
2720
2721 l = len;
2722 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002723 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002724
2725 return result;
2726}
2727
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002728MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2729 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002730{
2731 hwaddr l;
2732 hwaddr addr1;
2733 MemoryRegion *mr;
2734 MemTxResult result = MEMTX_OK;
2735
2736 if (len > 0) {
2737 rcu_read_lock();
2738 l = len;
2739 mr = address_space_translate(as, addr, &addr1, &l, false);
2740 result = address_space_read_continue(as, addr, attrs, buf, len,
2741 addr1, l, mr);
2742 rcu_read_unlock();
2743 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002744
2745 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002746}
2747
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002748MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2749 uint8_t *buf, int len, bool is_write)
2750{
2751 if (is_write) {
2752 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2753 } else {
2754 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2755 }
2756}
Avi Kivityac1970f2012-10-03 16:22:53 +02002757
Avi Kivitya8170e52012-10-23 12:30:10 +02002758void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002759 int len, int is_write)
2760{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002761 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2762 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002763}
2764
Alexander Graf582b55a2013-12-11 14:17:44 +01002765enum write_rom_type {
2766 WRITE_DATA,
2767 FLUSH_CACHE,
2768};
2769
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002770static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002771 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002772{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002773 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002774 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002775 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002776 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002777
Paolo Bonzini41063e12015-03-18 14:21:43 +01002778 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002779 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002780 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002781 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002782
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002783 if (!(memory_region_is_ram(mr) ||
2784 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002785 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002786 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002787 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002788 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002789 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002790 switch (type) {
2791 case WRITE_DATA:
2792 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002793 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002794 break;
2795 case FLUSH_CACHE:
2796 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2797 break;
2798 }
bellardd0ecd2a2006-04-23 17:14:48 +00002799 }
2800 len -= l;
2801 buf += l;
2802 addr += l;
2803 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002804 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002805}
2806
Alexander Graf582b55a2013-12-11 14:17:44 +01002807/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002808void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002809 const uint8_t *buf, int len)
2810{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002811 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002812}
2813
2814void cpu_flush_icache_range(hwaddr start, int len)
2815{
2816 /*
2817 * This function should do the same thing as an icache flush that was
2818 * triggered from within the guest. For TCG we are always cache coherent,
2819 * so there is no need to flush anything. For KVM / Xen we need to flush
2820 * the host's instruction cache at least.
2821 */
2822 if (tcg_enabled()) {
2823 return;
2824 }
2825
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002826 cpu_physical_memory_write_rom_internal(&address_space_memory,
2827 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002828}
2829
aliguori6d16c2f2009-01-22 16:59:11 +00002830typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002831 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002832 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002833 hwaddr addr;
2834 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002835 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002836} BounceBuffer;
2837
2838static BounceBuffer bounce;
2839
aliguoriba223c22009-01-22 16:59:16 +00002840typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002841 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002842 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002843} MapClient;
2844
Fam Zheng38e047b2015-03-16 17:03:35 +08002845QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002846static QLIST_HEAD(map_client_list, MapClient) map_client_list
2847 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002848
Fam Zhenge95205e2015-03-16 17:03:37 +08002849static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002850{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002851 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002852 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002853}
2854
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002855static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002856{
2857 MapClient *client;
2858
Blue Swirl72cf2d42009-09-12 07:36:22 +00002859 while (!QLIST_EMPTY(&map_client_list)) {
2860 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002861 qemu_bh_schedule(client->bh);
2862 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002863 }
2864}
2865
Fam Zhenge95205e2015-03-16 17:03:37 +08002866void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002867{
2868 MapClient *client = g_malloc(sizeof(*client));
2869
Fam Zheng38e047b2015-03-16 17:03:35 +08002870 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002871 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002872 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002873 if (!atomic_read(&bounce.in_use)) {
2874 cpu_notify_map_clients_locked();
2875 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002876 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002877}
2878
Fam Zheng38e047b2015-03-16 17:03:35 +08002879void cpu_exec_init_all(void)
2880{
2881 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002882 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002883 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002884 qemu_mutex_init(&map_client_list_lock);
2885}
2886
Fam Zhenge95205e2015-03-16 17:03:37 +08002887void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002888{
Fam Zhenge95205e2015-03-16 17:03:37 +08002889 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002890
Fam Zhenge95205e2015-03-16 17:03:37 +08002891 qemu_mutex_lock(&map_client_list_lock);
2892 QLIST_FOREACH(client, &map_client_list, link) {
2893 if (client->bh == bh) {
2894 cpu_unregister_map_client_do(client);
2895 break;
2896 }
2897 }
2898 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002899}
2900
2901static void cpu_notify_map_clients(void)
2902{
Fam Zheng38e047b2015-03-16 17:03:35 +08002903 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002904 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002905 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002906}
2907
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002908bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2909{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002910 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002911 hwaddr l, xlat;
2912
Paolo Bonzini41063e12015-03-18 14:21:43 +01002913 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002914 while (len > 0) {
2915 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002916 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2917 if (!memory_access_is_direct(mr, is_write)) {
2918 l = memory_access_size(mr, l, addr);
2919 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002920 return false;
2921 }
2922 }
2923
2924 len -= l;
2925 addr += l;
2926 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002927 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002928 return true;
2929}
2930
aliguori6d16c2f2009-01-22 16:59:11 +00002931/* Map a physical memory region into a host virtual address.
2932 * May map a subset of the requested range, given by and returned in *plen.
2933 * May return NULL if resources needed to perform the mapping are exhausted.
2934 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002935 * Use cpu_register_map_client() to know when retrying the map operation is
2936 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002937 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002938void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002939 hwaddr addr,
2940 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002941 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002942{
Avi Kivitya8170e52012-10-23 12:30:10 +02002943 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002944 hwaddr done = 0;
2945 hwaddr l, xlat, base;
2946 MemoryRegion *mr, *this_mr;
2947 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002948 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002949
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002950 if (len == 0) {
2951 return NULL;
2952 }
aliguori6d16c2f2009-01-22 16:59:11 +00002953
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002954 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002955 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002956 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002957
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002958 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002959 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002960 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002961 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002962 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002963 /* Avoid unbounded allocations */
2964 l = MIN(l, TARGET_PAGE_SIZE);
2965 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966 bounce.addr = addr;
2967 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002968
2969 memory_region_ref(mr);
2970 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002971 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002972 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2973 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002974 }
aliguori6d16c2f2009-01-22 16:59:11 +00002975
Paolo Bonzini41063e12015-03-18 14:21:43 +01002976 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002977 *plen = l;
2978 return bounce.buffer;
2979 }
2980
2981 base = xlat;
2982 raddr = memory_region_get_ram_addr(mr);
2983
2984 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002985 len -= l;
2986 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002987 done += l;
2988 if (len == 0) {
2989 break;
2990 }
2991
2992 l = len;
2993 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2994 if (this_mr != mr || xlat != base + done) {
2995 break;
2996 }
aliguori6d16c2f2009-01-22 16:59:11 +00002997 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002998
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002999 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003000 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003001 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003002 rcu_read_unlock();
3003
3004 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003005}
3006
Avi Kivityac1970f2012-10-03 16:22:53 +02003007/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003008 * Will also mark the memory as dirty if is_write == 1. access_len gives
3009 * the amount of memory that was actually read or written by the caller.
3010 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003011void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3012 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003013{
3014 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003015 MemoryRegion *mr;
3016 ram_addr_t addr1;
3017
3018 mr = qemu_ram_addr_from_host(buffer, &addr1);
3019 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003020 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003021 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003022 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003023 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003024 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003025 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003026 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003027 return;
3028 }
3029 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003030 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3031 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003032 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003033 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003034 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003035 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003036 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003037 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003038}
bellardd0ecd2a2006-04-23 17:14:48 +00003039
Avi Kivitya8170e52012-10-23 12:30:10 +02003040void *cpu_physical_memory_map(hwaddr addr,
3041 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003042 int is_write)
3043{
3044 return address_space_map(&address_space_memory, addr, plen, is_write);
3045}
3046
Avi Kivitya8170e52012-10-23 12:30:10 +02003047void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3048 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003049{
3050 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3051}
3052
bellard8df1cd02005-01-28 22:37:22 +00003053/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003054static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3055 MemTxAttrs attrs,
3056 MemTxResult *result,
3057 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003058{
bellard8df1cd02005-01-28 22:37:22 +00003059 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003060 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003061 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003062 hwaddr l = 4;
3063 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003064 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003065 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003066
Paolo Bonzini41063e12015-03-18 14:21:43 +01003067 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003068 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003069 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003070 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003071
bellard8df1cd02005-01-28 22:37:22 +00003072 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003073 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003074#if defined(TARGET_WORDS_BIGENDIAN)
3075 if (endian == DEVICE_LITTLE_ENDIAN) {
3076 val = bswap32(val);
3077 }
3078#else
3079 if (endian == DEVICE_BIG_ENDIAN) {
3080 val = bswap32(val);
3081 }
3082#endif
bellard8df1cd02005-01-28 22:37:22 +00003083 } else {
3084 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003085 ptr = qemu_get_ram_ptr(mr->ram_block,
3086 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003087 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003088 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003089 switch (endian) {
3090 case DEVICE_LITTLE_ENDIAN:
3091 val = ldl_le_p(ptr);
3092 break;
3093 case DEVICE_BIG_ENDIAN:
3094 val = ldl_be_p(ptr);
3095 break;
3096 default:
3097 val = ldl_p(ptr);
3098 break;
3099 }
Peter Maydell50013112015-04-26 16:49:24 +01003100 r = MEMTX_OK;
3101 }
3102 if (result) {
3103 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003104 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003105 if (release_lock) {
3106 qemu_mutex_unlock_iothread();
3107 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003108 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003109 return val;
3110}
3111
Peter Maydell50013112015-04-26 16:49:24 +01003112uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3113 MemTxAttrs attrs, MemTxResult *result)
3114{
3115 return address_space_ldl_internal(as, addr, attrs, result,
3116 DEVICE_NATIVE_ENDIAN);
3117}
3118
3119uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3120 MemTxAttrs attrs, MemTxResult *result)
3121{
3122 return address_space_ldl_internal(as, addr, attrs, result,
3123 DEVICE_LITTLE_ENDIAN);
3124}
3125
3126uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3127 MemTxAttrs attrs, MemTxResult *result)
3128{
3129 return address_space_ldl_internal(as, addr, attrs, result,
3130 DEVICE_BIG_ENDIAN);
3131}
3132
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003133uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003134{
Peter Maydell50013112015-04-26 16:49:24 +01003135 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003136}
3137
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003138uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003139{
Peter Maydell50013112015-04-26 16:49:24 +01003140 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003141}
3142
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003143uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003144{
Peter Maydell50013112015-04-26 16:49:24 +01003145 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003146}
3147
bellard84b7b8e2005-11-28 21:19:04 +00003148/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003149static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3150 MemTxAttrs attrs,
3151 MemTxResult *result,
3152 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003153{
bellard84b7b8e2005-11-28 21:19:04 +00003154 uint8_t *ptr;
3155 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003156 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003157 hwaddr l = 8;
3158 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003159 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003160 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003161
Paolo Bonzini41063e12015-03-18 14:21:43 +01003162 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003163 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003164 false);
3165 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003166 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003167
bellard84b7b8e2005-11-28 21:19:04 +00003168 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003169 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003170#if defined(TARGET_WORDS_BIGENDIAN)
3171 if (endian == DEVICE_LITTLE_ENDIAN) {
3172 val = bswap64(val);
3173 }
3174#else
3175 if (endian == DEVICE_BIG_ENDIAN) {
3176 val = bswap64(val);
3177 }
3178#endif
bellard84b7b8e2005-11-28 21:19:04 +00003179 } else {
3180 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003181 ptr = qemu_get_ram_ptr(mr->ram_block,
3182 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003183 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003184 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003185 switch (endian) {
3186 case DEVICE_LITTLE_ENDIAN:
3187 val = ldq_le_p(ptr);
3188 break;
3189 case DEVICE_BIG_ENDIAN:
3190 val = ldq_be_p(ptr);
3191 break;
3192 default:
3193 val = ldq_p(ptr);
3194 break;
3195 }
Peter Maydell50013112015-04-26 16:49:24 +01003196 r = MEMTX_OK;
3197 }
3198 if (result) {
3199 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003200 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003201 if (release_lock) {
3202 qemu_mutex_unlock_iothread();
3203 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003204 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003205 return val;
3206}
3207
Peter Maydell50013112015-04-26 16:49:24 +01003208uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3209 MemTxAttrs attrs, MemTxResult *result)
3210{
3211 return address_space_ldq_internal(as, addr, attrs, result,
3212 DEVICE_NATIVE_ENDIAN);
3213}
3214
3215uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3216 MemTxAttrs attrs, MemTxResult *result)
3217{
3218 return address_space_ldq_internal(as, addr, attrs, result,
3219 DEVICE_LITTLE_ENDIAN);
3220}
3221
3222uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3223 MemTxAttrs attrs, MemTxResult *result)
3224{
3225 return address_space_ldq_internal(as, addr, attrs, result,
3226 DEVICE_BIG_ENDIAN);
3227}
3228
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003229uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003230{
Peter Maydell50013112015-04-26 16:49:24 +01003231 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003232}
3233
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003234uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003235{
Peter Maydell50013112015-04-26 16:49:24 +01003236 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003237}
3238
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003239uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003240{
Peter Maydell50013112015-04-26 16:49:24 +01003241 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003242}
3243
bellardaab33092005-10-30 20:48:42 +00003244/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003245uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3246 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003247{
3248 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003249 MemTxResult r;
3250
3251 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3252 if (result) {
3253 *result = r;
3254 }
bellardaab33092005-10-30 20:48:42 +00003255 return val;
3256}
3257
Peter Maydell50013112015-04-26 16:49:24 +01003258uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3259{
3260 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3261}
3262
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003263/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003264static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3265 hwaddr addr,
3266 MemTxAttrs attrs,
3267 MemTxResult *result,
3268 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003269{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003270 uint8_t *ptr;
3271 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003272 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003273 hwaddr l = 2;
3274 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003275 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003276 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003277
Paolo Bonzini41063e12015-03-18 14:21:43 +01003278 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003279 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003280 false);
3281 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003282 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003283
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003284 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003285 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003286#if defined(TARGET_WORDS_BIGENDIAN)
3287 if (endian == DEVICE_LITTLE_ENDIAN) {
3288 val = bswap16(val);
3289 }
3290#else
3291 if (endian == DEVICE_BIG_ENDIAN) {
3292 val = bswap16(val);
3293 }
3294#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003295 } else {
3296 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003297 ptr = qemu_get_ram_ptr(mr->ram_block,
3298 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003299 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003300 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003301 switch (endian) {
3302 case DEVICE_LITTLE_ENDIAN:
3303 val = lduw_le_p(ptr);
3304 break;
3305 case DEVICE_BIG_ENDIAN:
3306 val = lduw_be_p(ptr);
3307 break;
3308 default:
3309 val = lduw_p(ptr);
3310 break;
3311 }
Peter Maydell50013112015-04-26 16:49:24 +01003312 r = MEMTX_OK;
3313 }
3314 if (result) {
3315 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003316 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003317 if (release_lock) {
3318 qemu_mutex_unlock_iothread();
3319 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003320 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003321 return val;
bellardaab33092005-10-30 20:48:42 +00003322}
3323
Peter Maydell50013112015-04-26 16:49:24 +01003324uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3325 MemTxAttrs attrs, MemTxResult *result)
3326{
3327 return address_space_lduw_internal(as, addr, attrs, result,
3328 DEVICE_NATIVE_ENDIAN);
3329}
3330
3331uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3332 MemTxAttrs attrs, MemTxResult *result)
3333{
3334 return address_space_lduw_internal(as, addr, attrs, result,
3335 DEVICE_LITTLE_ENDIAN);
3336}
3337
3338uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3339 MemTxAttrs attrs, MemTxResult *result)
3340{
3341 return address_space_lduw_internal(as, addr, attrs, result,
3342 DEVICE_BIG_ENDIAN);
3343}
3344
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003345uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003346{
Peter Maydell50013112015-04-26 16:49:24 +01003347 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003348}
3349
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003350uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003351{
Peter Maydell50013112015-04-26 16:49:24 +01003352 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003353}
3354
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003355uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356{
Peter Maydell50013112015-04-26 16:49:24 +01003357 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358}
3359
bellard8df1cd02005-01-28 22:37:22 +00003360/* warning: addr must be aligned. The ram page is not masked as dirty
3361 and the code inside is not invalidated. It is useful if the dirty
3362 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003363void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3364 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003365{
bellard8df1cd02005-01-28 22:37:22 +00003366 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003367 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003368 hwaddr l = 4;
3369 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003370 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003371 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003372 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003373
Paolo Bonzini41063e12015-03-18 14:21:43 +01003374 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003375 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003376 true);
3377 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003378 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003379
Peter Maydell50013112015-04-26 16:49:24 +01003380 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003381 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003382 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003383 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003384 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003385
Paolo Bonzini845b6212015-03-23 11:45:53 +01003386 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3387 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003388 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003389 r = MEMTX_OK;
3390 }
3391 if (result) {
3392 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003393 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003394 if (release_lock) {
3395 qemu_mutex_unlock_iothread();
3396 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003397 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003398}
3399
Peter Maydell50013112015-04-26 16:49:24 +01003400void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3401{
3402 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3403}
3404
bellard8df1cd02005-01-28 22:37:22 +00003405/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003406static inline void address_space_stl_internal(AddressSpace *as,
3407 hwaddr addr, uint32_t val,
3408 MemTxAttrs attrs,
3409 MemTxResult *result,
3410 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003411{
bellard8df1cd02005-01-28 22:37:22 +00003412 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003413 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003414 hwaddr l = 4;
3415 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003416 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003417 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003418
Paolo Bonzini41063e12015-03-18 14:21:43 +01003419 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003420 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003421 true);
3422 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003423 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003424
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003425#if defined(TARGET_WORDS_BIGENDIAN)
3426 if (endian == DEVICE_LITTLE_ENDIAN) {
3427 val = bswap32(val);
3428 }
3429#else
3430 if (endian == DEVICE_BIG_ENDIAN) {
3431 val = bswap32(val);
3432 }
3433#endif
Peter Maydell50013112015-04-26 16:49:24 +01003434 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003435 } else {
bellard8df1cd02005-01-28 22:37:22 +00003436 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003437 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003438 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003439 switch (endian) {
3440 case DEVICE_LITTLE_ENDIAN:
3441 stl_le_p(ptr, val);
3442 break;
3443 case DEVICE_BIG_ENDIAN:
3444 stl_be_p(ptr, val);
3445 break;
3446 default:
3447 stl_p(ptr, val);
3448 break;
3449 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003450 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003451 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003452 }
Peter Maydell50013112015-04-26 16:49:24 +01003453 if (result) {
3454 *result = r;
3455 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003456 if (release_lock) {
3457 qemu_mutex_unlock_iothread();
3458 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003459 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003460}
3461
3462void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3463 MemTxAttrs attrs, MemTxResult *result)
3464{
3465 address_space_stl_internal(as, addr, val, attrs, result,
3466 DEVICE_NATIVE_ENDIAN);
3467}
3468
3469void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3470 MemTxAttrs attrs, MemTxResult *result)
3471{
3472 address_space_stl_internal(as, addr, val, attrs, result,
3473 DEVICE_LITTLE_ENDIAN);
3474}
3475
3476void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3477 MemTxAttrs attrs, MemTxResult *result)
3478{
3479 address_space_stl_internal(as, addr, val, attrs, result,
3480 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003481}
3482
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003483void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003484{
Peter Maydell50013112015-04-26 16:49:24 +01003485 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003486}
3487
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003488void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003489{
Peter Maydell50013112015-04-26 16:49:24 +01003490 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003491}
3492
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003493void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003494{
Peter Maydell50013112015-04-26 16:49:24 +01003495 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003496}
3497
bellardaab33092005-10-30 20:48:42 +00003498/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003499void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3500 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003501{
3502 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003503 MemTxResult r;
3504
3505 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3506 if (result) {
3507 *result = r;
3508 }
3509}
3510
3511void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3512{
3513 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003514}
3515
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003516/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003517static inline void address_space_stw_internal(AddressSpace *as,
3518 hwaddr addr, uint32_t val,
3519 MemTxAttrs attrs,
3520 MemTxResult *result,
3521 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003522{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003523 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003524 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003525 hwaddr l = 2;
3526 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003527 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003528 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003529
Paolo Bonzini41063e12015-03-18 14:21:43 +01003530 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003531 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003532 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003533 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003534
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003535#if defined(TARGET_WORDS_BIGENDIAN)
3536 if (endian == DEVICE_LITTLE_ENDIAN) {
3537 val = bswap16(val);
3538 }
3539#else
3540 if (endian == DEVICE_BIG_ENDIAN) {
3541 val = bswap16(val);
3542 }
3543#endif
Peter Maydell50013112015-04-26 16:49:24 +01003544 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003545 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003546 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003547 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003548 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003549 switch (endian) {
3550 case DEVICE_LITTLE_ENDIAN:
3551 stw_le_p(ptr, val);
3552 break;
3553 case DEVICE_BIG_ENDIAN:
3554 stw_be_p(ptr, val);
3555 break;
3556 default:
3557 stw_p(ptr, val);
3558 break;
3559 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003560 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003561 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003562 }
Peter Maydell50013112015-04-26 16:49:24 +01003563 if (result) {
3564 *result = r;
3565 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003566 if (release_lock) {
3567 qemu_mutex_unlock_iothread();
3568 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003569 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003570}
3571
3572void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3573 MemTxAttrs attrs, MemTxResult *result)
3574{
3575 address_space_stw_internal(as, addr, val, attrs, result,
3576 DEVICE_NATIVE_ENDIAN);
3577}
3578
3579void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3580 MemTxAttrs attrs, MemTxResult *result)
3581{
3582 address_space_stw_internal(as, addr, val, attrs, result,
3583 DEVICE_LITTLE_ENDIAN);
3584}
3585
3586void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3587 MemTxAttrs attrs, MemTxResult *result)
3588{
3589 address_space_stw_internal(as, addr, val, attrs, result,
3590 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003591}
3592
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003593void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003594{
Peter Maydell50013112015-04-26 16:49:24 +01003595 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003596}
3597
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003598void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003599{
Peter Maydell50013112015-04-26 16:49:24 +01003600 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003601}
3602
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003603void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003604{
Peter Maydell50013112015-04-26 16:49:24 +01003605 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003606}
3607
bellardaab33092005-10-30 20:48:42 +00003608/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003609void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3610 MemTxAttrs attrs, MemTxResult *result)
3611{
3612 MemTxResult r;
3613 val = tswap64(val);
3614 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3615 if (result) {
3616 *result = r;
3617 }
3618}
3619
3620void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3621 MemTxAttrs attrs, MemTxResult *result)
3622{
3623 MemTxResult r;
3624 val = cpu_to_le64(val);
3625 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3626 if (result) {
3627 *result = r;
3628 }
3629}
3630void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3631 MemTxAttrs attrs, MemTxResult *result)
3632{
3633 MemTxResult r;
3634 val = cpu_to_be64(val);
3635 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3636 if (result) {
3637 *result = r;
3638 }
3639}
3640
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003641void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003642{
Peter Maydell50013112015-04-26 16:49:24 +01003643 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003644}
3645
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003646void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003647{
Peter Maydell50013112015-04-26 16:49:24 +01003648 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003649}
3650
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003651void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003652{
Peter Maydell50013112015-04-26 16:49:24 +01003653 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003654}
3655
aliguori5e2972f2009-03-28 17:51:36 +00003656/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003657int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003658 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003659{
3660 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003661 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003662 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003663
3664 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003665 int asidx;
3666 MemTxAttrs attrs;
3667
bellard13eb76e2004-01-24 15:23:36 +00003668 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003669 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3670 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003671 /* if no physical page mapped, return an error */
3672 if (phys_addr == -1)
3673 return -1;
3674 l = (page + TARGET_PAGE_SIZE) - addr;
3675 if (l > len)
3676 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003677 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003678 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003679 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3680 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003681 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003682 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3683 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003684 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003685 }
bellard13eb76e2004-01-24 15:23:36 +00003686 len -= l;
3687 buf += l;
3688 addr += l;
3689 }
3690 return 0;
3691}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003692
3693/*
3694 * Allows code that needs to deal with migration bitmaps etc to still be built
3695 * target independent.
3696 */
3697size_t qemu_target_page_bits(void)
3698{
3699 return TARGET_PAGE_BITS;
3700}
3701
Paul Brooka68fe892010-03-01 00:08:59 +00003702#endif
bellard13eb76e2004-01-24 15:23:36 +00003703
Blue Swirl8e4a4242013-01-06 18:30:17 +00003704/*
3705 * A helper function for the _utterly broken_ virtio device model to find out if
3706 * it's running on a big endian machine. Don't do this at home kids!
3707 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003708bool target_words_bigendian(void);
3709bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003710{
3711#if defined(TARGET_WORDS_BIGENDIAN)
3712 return true;
3713#else
3714 return false;
3715#endif
3716}
3717
Wen Congyang76f35532012-05-07 12:04:18 +08003718#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003719bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003720{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003721 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003722 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003723 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003724
Paolo Bonzini41063e12015-03-18 14:21:43 +01003725 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003726 mr = address_space_translate(&address_space_memory,
3727 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003728
Paolo Bonzini41063e12015-03-18 14:21:43 +01003729 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3730 rcu_read_unlock();
3731 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003732}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003733
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003734int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003735{
3736 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003737 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003738
Mike Day0dc3f442013-09-05 14:41:35 -04003739 rcu_read_lock();
3740 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003741 ret = func(block->idstr, block->host, block->offset,
3742 block->used_length, opaque);
3743 if (ret) {
3744 break;
3745 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003746 }
Mike Day0dc3f442013-09-05 14:41:35 -04003747 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003748 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003749}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003750#endif