blob: a1dfc010204c7caefda5267939cfa860a9b89d3b [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010027#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000028#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020029#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020031#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010032#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010033#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020042#include "hw/hw.h"
43#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010044#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020045#include "sysemu/dma.h"
46#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010047#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010048#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000049#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010050#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040051#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020052#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030054#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000055
Paolo Bonzini022c62c2012-12-17 18:19:49 +010056#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020057#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030058#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020059
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020060#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030061#ifndef _WIN32
62#include "qemu/mmap-alloc.h"
63#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020064
blueswir1db7b5422007-05-26 17:36:03 +000065//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000066
pbrook99773bd2006-04-16 15:14:59 +000067#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040068/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
69 * are protected by the ramlist lock.
70 */
Mike Day0d53d9f2015-01-21 13:45:24 +010071RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030072
73static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030074static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030075
Avi Kivityf6790af2012-10-02 20:13:51 +020076AddressSpace address_space_io;
77AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020078
Paolo Bonzini0844e002013-05-24 14:37:28 +020079MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020080static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020081
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080082/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
83#define RAM_PREALLOC (1 << 0)
84
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080085/* RAM is mmap-ed with MAP_SHARED */
86#define RAM_SHARED (1 << 1)
87
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020088/* Only a portion of RAM (used_length) is actually used, and migrated.
89 * This used_length size can change across reboots.
90 */
91#define RAM_RESIZEABLE (1 << 2)
92
pbrooke2eef172008-06-08 01:09:01 +000093#endif
bellard9fa3e852004-01-04 18:06:42 +000094
Andreas Färberbdc44642013-06-24 23:50:24 +020095struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000096/* current CPU in the current thread. It is only valid inside
97 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020098__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000099/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000100 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000101 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100102int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000103
pbrooke2eef172008-06-08 01:09:01 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106typedef struct PhysPageEntry PhysPageEntry;
107
108struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200113};
114
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200115#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
116
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100118#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200120#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121#define P_L2_SIZE (1 << P_L2_BITS)
122
123#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
124
125typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100128 struct rcu_head rcu;
129
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100139 struct rcu_head rcu;
140
Fam Zheng729633c2016-03-01 14:18:24 +0800141 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200146 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200147 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Jan Kiszka90260c62013-05-26 21:46:51 +0200150#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151typedef struct subpage_t {
152 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200153 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156} subpage_t;
157
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200158#define PHYS_SECTION_UNASSIGNED 0
159#define PHYS_SECTION_NOTDIRTY 1
160#define PHYS_SECTION_ROM 2
161#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200162
pbrooke2eef172008-06-08 01:09:01 +0000163static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300164static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000165static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000166
Avi Kivity1ec9b902012-01-02 12:47:48 +0200167static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100168
169/**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181};
182
pbrook6658ffb2007-03-16 23:58:11 +0000183#endif
bellard54936002003-05-13 00:25:15 +0000184
Paul Brook6d9a1302010-02-28 23:55:53 +0000185#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 }
194}
195
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200197{
198 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200199 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 PhysPageEntry e;
201 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200211 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200213 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214}
215
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200218 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219{
220 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200231 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200232 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 *index += step;
234 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200235 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200237 }
238 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200239 }
240}
241
Avi Kivityac1970f2012-10-03 16:22:53 +0200242static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200243 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200244 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000245{
Avi Kivity29990972012-02-13 20:21:20 +0200246 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000248
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000250}
251
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200252/* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256{
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303}
304
305static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306{
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200311 }
312}
313
Fam Zheng29cb5332016-03-01 14:18:23 +0800314static inline bool section_covers_addr(const MemoryRegionSection *section,
315 hwaddr addr)
316{
317 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
318 * the section must cover the entire address space.
319 */
320 return section->size.hi ||
321 range_covers_byte(section->offset_within_address_space,
322 section->size.lo, addr);
323}
324
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200325static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200326 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000327{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200328 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200329 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200330 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200331
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200332 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200333 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200334 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200335 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200336 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100337 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200338 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200339
Fam Zheng29cb5332016-03-01 14:18:23 +0800340 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200341 return &sections[lp.ptr];
342 } else {
343 return &sections[PHYS_SECTION_UNASSIGNED];
344 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200345}
346
Blue Swirle5548612012-04-21 13:08:33 +0000347bool memory_region_is_unassigned(MemoryRegion *mr)
348{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200349 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000350 && mr != &io_mem_watch;
351}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200352
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100353/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200354static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200355 hwaddr addr,
356 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200357{
Fam Zheng729633c2016-03-01 14:18:24 +0800358 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200359 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800360 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200361
Fam Zheng729633c2016-03-01 14:18:24 +0800362 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
363 section_covers_addr(section, addr)) {
364 update = false;
365 } else {
366 section = phys_page_find(d->phys_map, addr, d->map.nodes,
367 d->map.sections);
368 update = true;
369 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200370 if (resolve_subpage && section->mr->subpage) {
371 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200372 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 }
Fam Zheng729633c2016-03-01 14:18:24 +0800374 if (update) {
375 atomic_set(&d->mru_section, section);
376 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200377 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200378}
379
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100380/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200381static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200382address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200383 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200384{
385 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200386 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100387 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200388
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390 /* Compute offset within MemoryRegionSection */
391 addr -= section->offset_within_address_space;
392
393 /* Compute offset within MemoryRegion */
394 *xlat = addr + section->offset_within_region;
395
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200396 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200397
398 /* MMIO registers can be expected to perform full-width accesses based only
399 * on their address, without considering adjacent registers that could
400 * decode to completely different MemoryRegions. When such registers
401 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
402 * regions overlap wildly. For this reason we cannot clamp the accesses
403 * here.
404 *
405 * If the length is small (as is the case for address_space_ldl/stl),
406 * everything works fine. If the incoming length is large, however,
407 * the caller really has to do the clamping through memory_access_size.
408 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200409 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200410 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200411 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
412 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200413 return section;
414}
Jan Kiszka90260c62013-05-26 21:46:51 +0200415
Paolo Bonzini41063e12015-03-18 14:21:43 +0100416/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200417MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
418 hwaddr *xlat, hwaddr *plen,
419 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200420{
Avi Kivity30951152012-10-30 13:47:46 +0200421 IOMMUTLBEntry iotlb;
422 MemoryRegionSection *section;
423 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200424
425 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100426 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
427 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200428 mr = section->mr;
429
430 if (!mr->iommu_ops) {
431 break;
432 }
433
Le Tan8d7b8cb2014-08-16 13:55:37 +0800434 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200435 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
436 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700437 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200438 if (!(iotlb.perm & (1 << is_write))) {
439 mr = &io_mem_unassigned;
440 break;
441 }
442
443 as = iotlb.target_as;
444 }
445
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000446 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100447 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700448 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100449 }
450
Avi Kivity30951152012-10-30 13:47:46 +0200451 *xlat = addr;
452 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200453}
454
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100455/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200456MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000457address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200458 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200459{
Avi Kivity30951152012-10-30 13:47:46 +0200460 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000461 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
462
463 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200464
465 assert(!section->mr->iommu_ops);
466 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200467}
bellard9fa3e852004-01-04 18:06:42 +0000468#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000469
Andreas Färberb170fce2013-01-20 20:23:22 +0100470#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000471
Juan Quintelae59fb372009-09-29 22:48:21 +0200472static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473{
Andreas Färber259186a2013-01-17 18:51:17 +0100474 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200475
aurel323098dba2009-03-07 21:28:24 +0000476 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
477 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100478 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100479 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000480
481 return 0;
482}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200483
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400484static int cpu_common_pre_load(void *opaque)
485{
486 CPUState *cpu = opaque;
487
Paolo Bonziniadee6422014-12-19 12:53:14 +0100488 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400489
490 return 0;
491}
492
493static bool cpu_common_exception_index_needed(void *opaque)
494{
495 CPUState *cpu = opaque;
496
Paolo Bonziniadee6422014-12-19 12:53:14 +0100497 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400498}
499
500static const VMStateDescription vmstate_cpu_common_exception_index = {
501 .name = "cpu_common/exception_index",
502 .version_id = 1,
503 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200504 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400505 .fields = (VMStateField[]) {
506 VMSTATE_INT32(exception_index, CPUState),
507 VMSTATE_END_OF_LIST()
508 }
509};
510
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300511static bool cpu_common_crash_occurred_needed(void *opaque)
512{
513 CPUState *cpu = opaque;
514
515 return cpu->crash_occurred;
516}
517
518static const VMStateDescription vmstate_cpu_common_crash_occurred = {
519 .name = "cpu_common/crash_occurred",
520 .version_id = 1,
521 .minimum_version_id = 1,
522 .needed = cpu_common_crash_occurred_needed,
523 .fields = (VMStateField[]) {
524 VMSTATE_BOOL(crash_occurred, CPUState),
525 VMSTATE_END_OF_LIST()
526 }
527};
528
Andreas Färber1a1562f2013-06-17 04:09:11 +0200529const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200530 .name = "cpu_common",
531 .version_id = 1,
532 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200534 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200535 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100536 VMSTATE_UINT32(halted, CPUState),
537 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200538 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400539 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200540 .subsections = (const VMStateDescription*[]) {
541 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300542 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200544 }
545};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200546
pbrook9656f322008-07-01 20:01:19 +0000547#endif
548
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100549CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400550{
Andreas Färberbdc44642013-06-24 23:50:24 +0200551 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400552
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100554 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200555 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 }
Glauber Costa950f1472009-06-09 12:15:18 -0400557 }
558
Andreas Färberbdc44642013-06-24 23:50:24 +0200559 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400560}
561
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000563void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000564{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000565 CPUAddressSpace *newas;
566
567 /* Target code should have set num_ases before calling us */
568 assert(asidx < cpu->num_ases);
569
Peter Maydell56943e82016-01-21 14:15:04 +0000570 if (asidx == 0) {
571 /* address space 0 gets the convenience alias */
572 cpu->as = as;
573 }
574
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000575 /* KVM cannot currently support multiple address spaces. */
576 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 if (!cpu->cpu_ases) {
579 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580 }
Peter Maydell32857f42015-10-01 15:29:50 +0100581
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000582 newas = &cpu->cpu_ases[asidx];
583 newas->cpu = cpu;
584 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000585 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000586 newas->tcg_as_listener.commit = tcg_commit;
587 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000588 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000589}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000590
591AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
592{
593 /* Return the AddressSpace corresponding to the specified index */
594 return cpu->cpu_ases[asidx].as;
595}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000596#endif
597
Bharata B Raob7bca732015-06-23 19:31:13 -0700598#ifndef CONFIG_USER_ONLY
599static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
600
601static int cpu_get_free_index(Error **errp)
602{
603 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
604
605 if (cpu >= MAX_CPUMASK_BITS) {
606 error_setg(errp, "Trying to use more CPUs than max of %d",
607 MAX_CPUMASK_BITS);
608 return -1;
609 }
610
611 bitmap_set(cpu_index_map, cpu, 1);
612 return cpu;
613}
614
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530615static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700616{
Bharata B Raob7bca732015-06-23 19:31:13 -0700617 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
Bharata B Raob7bca732015-06-23 19:31:13 -0700618}
619#else
620
621static int cpu_get_free_index(Error **errp)
622{
623 CPUState *some_cpu;
624 int cpu_index = 0;
625
626 CPU_FOREACH(some_cpu) {
627 cpu_index++;
628 }
629 return cpu_index;
630}
631
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530632static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700633{
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634 return;
Bharata B Raob7bca732015-06-23 19:31:13 -0700635}
636#endif
637
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530638void cpu_exec_exit(CPUState *cpu)
639{
640#if defined(CONFIG_USER_ONLY)
641 cpu_list_lock();
642#endif
643 if (cpu->cpu_index == -1) {
644 /* cpu_index was never allocated by this @cpu or was already freed. */
645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
648 return;
649 }
650
651 QTAILQ_REMOVE(&cpus, cpu, node);
652 cpu_release_index(cpu);
653 cpu->cpu_index = -1;
654#if defined(CONFIG_USER_ONLY)
655 cpu_list_unlock();
656#endif
657}
658
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700659void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000660{
Andreas Färberb170fce2013-01-20 20:23:22 +0100661 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700662 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000663
Peter Maydell56943e82016-01-21 14:15:04 +0000664 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000665 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000666
Eduardo Habkost291135b2015-04-27 17:00:33 -0300667#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300668 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000669
670 /* This is a softmmu CPU object, so create a property for it
671 * so users can wire up its memory. (This can't go in qom/cpu.c
672 * because that file is compiled only once for both user-mode
673 * and system builds.) The default if no link is set up is to use
674 * the system address space.
675 */
676 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
677 (Object **)&cpu->memory,
678 qdev_prop_allow_set_link_before_realize,
679 OBJ_PROP_LINK_UNREF_ON_RELEASE,
680 &error_abort);
681 cpu->memory = system_memory;
682 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300683#endif
684
pbrookc2764712009-03-07 15:24:59 +0000685#if defined(CONFIG_USER_ONLY)
686 cpu_list_lock();
687#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200688 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700689 if (local_err) {
690 error_propagate(errp, local_err);
691#if defined(CONFIG_USER_ONLY)
692 cpu_list_unlock();
693#endif
694 return;
bellard6a00d602005-11-21 23:25:50 +0000695 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200696 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000697#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200698 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000699 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200700#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200701 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200702 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200703 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100704 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200705 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100706 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200707#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000708}
709
Paul Brook94df27f2010-02-28 23:47:45 +0000710#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200711static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000712{
713 tb_invalidate_phys_page_range(pc, pc + 1, 0);
714}
715#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200716static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400717{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000718 MemTxAttrs attrs;
719 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
720 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400721 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000722 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100723 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400724 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400725}
bellardc27004e2005-01-03 23:35:10 +0000726#endif
bellardd720b932004-04-25 17:57:43 +0000727
Paul Brookc527ee82010-03-01 03:31:14 +0000728#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200729void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000730
731{
732}
733
Peter Maydell3ee887e2014-09-12 14:06:48 +0100734int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
735 int flags)
736{
737 return -ENOSYS;
738}
739
740void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
741{
742}
743
Andreas Färber75a34032013-09-02 16:57:02 +0200744int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000745 int flags, CPUWatchpoint **watchpoint)
746{
747 return -ENOSYS;
748}
749#else
pbrook6658ffb2007-03-16 23:58:11 +0000750/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200751int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000752 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000753{
aliguoric0ce9982008-11-25 22:13:57 +0000754 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000755
Peter Maydell05068c02014-09-12 14:06:48 +0100756 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700757 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200758 error_report("tried to set invalid watchpoint at %"
759 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000760 return -EINVAL;
761 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500762 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000763
aliguoria1d1bb32008-11-18 20:07:32 +0000764 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100765 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000766 wp->flags = flags;
767
aliguori2dc9f412008-11-18 20:56:59 +0000768 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200769 if (flags & BP_GDB) {
770 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
771 } else {
772 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
773 }
aliguoria1d1bb32008-11-18 20:07:32 +0000774
Andreas Färber31b030d2013-09-04 01:29:02 +0200775 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000776
777 if (watchpoint)
778 *watchpoint = wp;
779 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000780}
781
aliguoria1d1bb32008-11-18 20:07:32 +0000782/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200783int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000784 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000785{
aliguoria1d1bb32008-11-18 20:07:32 +0000786 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000787
Andreas Färberff4700b2013-08-26 18:23:18 +0200788 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100789 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000790 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200791 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000792 return 0;
793 }
794 }
aliguoria1d1bb32008-11-18 20:07:32 +0000795 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000796}
797
aliguoria1d1bb32008-11-18 20:07:32 +0000798/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200799void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000800{
Andreas Färberff4700b2013-08-26 18:23:18 +0200801 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000802
Andreas Färber31b030d2013-09-04 01:29:02 +0200803 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Anthony Liguori7267c092011-08-20 22:09:37 -0500805 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000806}
807
aliguoria1d1bb32008-11-18 20:07:32 +0000808/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200809void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000810{
aliguoric0ce9982008-11-25 22:13:57 +0000811 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000812
Andreas Färberff4700b2013-08-26 18:23:18 +0200813 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200814 if (wp->flags & mask) {
815 cpu_watchpoint_remove_by_ref(cpu, wp);
816 }
aliguoric0ce9982008-11-25 22:13:57 +0000817 }
aliguoria1d1bb32008-11-18 20:07:32 +0000818}
Peter Maydell05068c02014-09-12 14:06:48 +0100819
820/* Return true if this watchpoint address matches the specified
821 * access (ie the address range covered by the watchpoint overlaps
822 * partially or completely with the address range covered by the
823 * access).
824 */
825static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
826 vaddr addr,
827 vaddr len)
828{
829 /* We know the lengths are non-zero, but a little caution is
830 * required to avoid errors in the case where the range ends
831 * exactly at the top of the address space and so addr + len
832 * wraps round to zero.
833 */
834 vaddr wpend = wp->vaddr + wp->len - 1;
835 vaddr addrend = addr + len - 1;
836
837 return !(addr > wpend || wp->vaddr > addrend);
838}
839
Paul Brookc527ee82010-03-01 03:31:14 +0000840#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000841
842/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200843int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000844 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000845{
aliguoric0ce9982008-11-25 22:13:57 +0000846 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000847
Anthony Liguori7267c092011-08-20 22:09:37 -0500848 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000849
850 bp->pc = pc;
851 bp->flags = flags;
852
aliguori2dc9f412008-11-18 20:56:59 +0000853 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200854 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200855 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200856 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200857 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200858 }
aliguoria1d1bb32008-11-18 20:07:32 +0000859
Andreas Färberf0c3c502013-08-26 21:22:53 +0200860 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000861
Andreas Färber00b941e2013-06-29 18:55:54 +0200862 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000863 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200864 }
aliguoria1d1bb32008-11-18 20:07:32 +0000865 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000866}
867
868/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200869int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000870{
aliguoria1d1bb32008-11-18 20:07:32 +0000871 CPUBreakpoint *bp;
872
Andreas Färberf0c3c502013-08-26 21:22:53 +0200873 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000874 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200875 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000876 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000877 }
bellard4c3a88a2003-07-26 12:06:08 +0000878 }
aliguoria1d1bb32008-11-18 20:07:32 +0000879 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000880}
881
aliguoria1d1bb32008-11-18 20:07:32 +0000882/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200883void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000884{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200885 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
886
887 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000888
Anthony Liguori7267c092011-08-20 22:09:37 -0500889 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000890}
891
892/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200893void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000894{
aliguoric0ce9982008-11-25 22:13:57 +0000895 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000896
Andreas Färberf0c3c502013-08-26 21:22:53 +0200897 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200898 if (bp->flags & mask) {
899 cpu_breakpoint_remove_by_ref(cpu, bp);
900 }
aliguoric0ce9982008-11-25 22:13:57 +0000901 }
bellard4c3a88a2003-07-26 12:06:08 +0000902}
903
bellardc33a3462003-07-29 20:50:33 +0000904/* enable or disable single step mode. EXCP_DEBUG is returned by the
905 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200906void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000907{
Andreas Färbered2803d2013-06-21 20:20:45 +0200908 if (cpu->singlestep_enabled != enabled) {
909 cpu->singlestep_enabled = enabled;
910 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200911 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200912 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100913 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000914 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700915 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000916 }
bellardc33a3462003-07-29 20:50:33 +0000917 }
bellardc33a3462003-07-29 20:50:33 +0000918}
919
Andreas Färbera47dddd2013-09-03 17:38:47 +0200920void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000921{
922 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000923 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000924
925 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000926 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000927 fprintf(stderr, "qemu: fatal: ");
928 vfprintf(stderr, fmt, ap);
929 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200930 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100931 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000932 qemu_log("qemu: fatal: ");
933 qemu_log_vprintf(fmt, ap2);
934 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200935 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000936 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000937 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000938 }
pbrook493ae1f2007-11-23 16:53:59 +0000939 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000940 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300941 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200942#if defined(CONFIG_USER_ONLY)
943 {
944 struct sigaction act;
945 sigfillset(&act.sa_mask);
946 act.sa_handler = SIG_DFL;
947 sigaction(SIGABRT, &act, NULL);
948 }
949#endif
bellard75012672003-06-21 13:11:07 +0000950 abort();
951}
952
bellard01243112004-01-04 15:48:17 +0000953#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400954/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200955static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
956{
957 RAMBlock *block;
958
Paolo Bonzini43771532013-09-09 17:58:40 +0200959 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200960 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200961 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200962 }
Mike Day0dc3f442013-09-05 14:41:35 -0400963 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200964 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200965 goto found;
966 }
967 }
968
969 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
970 abort();
971
972found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200973 /* It is safe to write mru_block outside the iothread lock. This
974 * is what happens:
975 *
976 * mru_block = xxx
977 * rcu_read_unlock()
978 * xxx removed from list
979 * rcu_read_lock()
980 * read mru_block
981 * mru_block = NULL;
982 * call_rcu(reclaim_ramblock, xxx);
983 * rcu_read_unlock()
984 *
985 * atomic_rcu_set is not needed here. The block was already published
986 * when it was placed into the list. Here we're just making an extra
987 * copy of the pointer.
988 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200989 ram_list.mru_block = block;
990 return block;
991}
992
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200993static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000994{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700995 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200996 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200997 RAMBlock *block;
998 ram_addr_t end;
999
1000 end = TARGET_PAGE_ALIGN(start + length);
1001 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +00001002
Mike Day0dc3f442013-09-05 14:41:35 -04001003 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +02001004 block = qemu_get_ram_block(start);
1005 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001006 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001007 CPU_FOREACH(cpu) {
1008 tlb_reset_dirty(cpu, start1, length);
1009 }
Mike Day0dc3f442013-09-05 14:41:35 -04001010 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +02001011}
1012
1013/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001014bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1015 ram_addr_t length,
1016 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001017{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001018 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001019 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001020 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001021
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001022 if (length == 0) {
1023 return false;
1024 }
1025
1026 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1027 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001028
1029 rcu_read_lock();
1030
1031 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1032
1033 while (page < end) {
1034 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1035 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1036 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1037
1038 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1039 offset, num);
1040 page += num;
1041 }
1042
1043 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001044
1045 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001046 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001047 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001048
1049 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001050}
1051
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001052/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001053hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001054 MemoryRegionSection *section,
1055 target_ulong vaddr,
1056 hwaddr paddr, hwaddr xlat,
1057 int prot,
1058 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001059{
Avi Kivitya8170e52012-10-23 12:30:10 +02001060 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001061 CPUWatchpoint *wp;
1062
Blue Swirlcc5bea62012-04-14 14:56:48 +00001063 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001064 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001065 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001066 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001067 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001068 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001069 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001070 }
1071 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001072 AddressSpaceDispatch *d;
1073
1074 d = atomic_rcu_read(&section->address_space->dispatch);
1075 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001076 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001077 }
1078
1079 /* Make accesses to pages with watchpoints go via the
1080 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001081 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001082 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001083 /* Avoid trapping reads of pages with a write breakpoint. */
1084 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001085 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001086 *address |= TLB_MMIO;
1087 break;
1088 }
1089 }
1090 }
1091
1092 return iotlb;
1093}
bellard9fa3e852004-01-04 18:06:42 +00001094#endif /* defined(CONFIG_USER_ONLY) */
1095
pbrooke2eef172008-06-08 01:09:01 +00001096#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001097
Anthony Liguoric227f092009-10-01 16:12:16 -05001098static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001099 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001100static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001101
Igor Mammedova2b257d2014-10-31 16:38:37 +00001102static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1103 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001104
1105/*
1106 * Set a custom physical guest memory alloator.
1107 * Accelerators with unusual needs may need this. Hopefully, we can
1108 * get rid of it eventually.
1109 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001110void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001111{
1112 phys_mem_alloc = alloc;
1113}
1114
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001115static uint16_t phys_section_add(PhysPageMap *map,
1116 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001117{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001118 /* The physical section number is ORed with a page-aligned
1119 * pointer to produce the iotlb entries. Thus it should
1120 * never overflow into the page-aligned value.
1121 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001122 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001124 if (map->sections_nb == map->sections_nb_alloc) {
1125 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1126 map->sections = g_renew(MemoryRegionSection, map->sections,
1127 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001128 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001129 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001130 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001131 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001132}
1133
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001134static void phys_section_destroy(MemoryRegion *mr)
1135{
Don Slutz55b4e802015-11-30 17:11:04 -05001136 bool have_sub_page = mr->subpage;
1137
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001138 memory_region_unref(mr);
1139
Don Slutz55b4e802015-11-30 17:11:04 -05001140 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001141 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001142 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001143 g_free(subpage);
1144 }
1145}
1146
Paolo Bonzini60926662013-05-29 12:30:26 +02001147static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001148{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001149 while (map->sections_nb > 0) {
1150 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001151 phys_section_destroy(section->mr);
1152 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001153 g_free(map->sections);
1154 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001155}
1156
Avi Kivityac1970f2012-10-03 16:22:53 +02001157static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001158{
1159 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001160 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001162 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001163 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001164 MemoryRegionSection subsection = {
1165 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001166 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001167 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001168 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001169
Avi Kivityf3705d52012-03-08 16:16:34 +02001170 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001171
Avi Kivityf3705d52012-03-08 16:16:34 +02001172 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001173 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001174 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001175 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001176 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001177 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001178 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001179 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001180 }
1181 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001182 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001183 subpage_register(subpage, start, end,
1184 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001185}
1186
1187
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001188static void register_multipage(AddressSpaceDispatch *d,
1189 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001190{
Avi Kivitya8170e52012-10-23 12:30:10 +02001191 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001192 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1194 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001195
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001196 assert(num_pages);
1197 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001198}
1199
Avi Kivityac1970f2012-10-03 16:22:53 +02001200static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001201{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001202 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001203 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001204 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001206
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001207 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1208 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1209 - now.offset_within_address_space;
1210
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001211 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001212 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001213 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001214 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001215 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001216 while (int128_ne(remain.size, now.size)) {
1217 remain.size = int128_sub(remain.size, now.size);
1218 remain.offset_within_address_space += int128_get64(now.size);
1219 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001220 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001221 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001222 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001223 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001224 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001225 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001226 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001227 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001228 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001229 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001230 }
1231}
1232
Sheng Yang62a27442010-01-26 19:21:16 +08001233void qemu_flush_coalesced_mmio_buffer(void)
1234{
1235 if (kvm_enabled())
1236 kvm_flush_coalesced_mmio_buffer();
1237}
1238
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001239void qemu_mutex_lock_ramlist(void)
1240{
1241 qemu_mutex_lock(&ram_list.mutex);
1242}
1243
1244void qemu_mutex_unlock_ramlist(void)
1245{
1246 qemu_mutex_unlock(&ram_list.mutex);
1247}
1248
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001249#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001250static void *file_ram_alloc(RAMBlock *block,
1251 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001252 const char *path,
1253 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001254{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001255 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001256 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001257 char *sanitized_name;
1258 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001259 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001260 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001261 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001262
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001263 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1264 error_setg(errp,
1265 "host lacks kvm mmu notifiers, -mem-path unsupported");
1266 return NULL;
1267 }
1268
1269 for (;;) {
1270 fd = open(path, O_RDWR);
1271 if (fd >= 0) {
1272 /* @path names an existing file, use it */
1273 break;
1274 }
1275 if (errno == ENOENT) {
1276 /* @path names a file that doesn't exist, create it */
1277 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1278 if (fd >= 0) {
1279 unlink_on_error = true;
1280 break;
1281 }
1282 } else if (errno == EISDIR) {
1283 /* @path names a directory, create a file there */
1284 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1285 sanitized_name = g_strdup(memory_region_name(block->mr));
1286 for (c = sanitized_name; *c != '\0'; c++) {
1287 if (*c == '/') {
1288 *c = '_';
1289 }
1290 }
1291
1292 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1293 sanitized_name);
1294 g_free(sanitized_name);
1295
1296 fd = mkstemp(filename);
1297 if (fd >= 0) {
1298 unlink(filename);
1299 g_free(filename);
1300 break;
1301 }
1302 g_free(filename);
1303 }
1304 if (errno != EEXIST && errno != EINTR) {
1305 error_setg_errno(errp, errno,
1306 "can't open backing store %s for guest RAM",
1307 path);
1308 goto error;
1309 }
1310 /*
1311 * Try again on EINTR and EEXIST. The latter happens when
1312 * something else creates the file between our two open().
1313 */
1314 }
1315
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001316 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001317 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001318
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001319 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001320 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001321 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001322 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001323 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001324 }
1325
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001326 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001327
1328 /*
1329 * ftruncate is not supported by hugetlbfs in older
1330 * hosts, so don't bother bailing out on errors.
1331 * If anything goes wrong with it under other filesystems,
1332 * mmap will fail.
1333 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001334 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001335 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001336 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001337
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001338 area = qemu_ram_mmap(fd, memory, block->mr->align,
1339 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001340 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001341 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001342 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001343 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001344 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001345
1346 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001347 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001348 }
1349
Alex Williamson04b16652010-07-02 11:13:17 -06001350 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001351 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001352
1353error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001354 if (unlink_on_error) {
1355 unlink(path);
1356 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001357 if (fd != -1) {
1358 close(fd);
1359 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001360 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001361}
1362#endif
1363
Mike Day0dc3f442013-09-05 14:41:35 -04001364/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001365static ram_addr_t find_ram_offset(ram_addr_t size)
1366{
Alex Williamson04b16652010-07-02 11:13:17 -06001367 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001368 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001369
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001370 assert(size != 0); /* it would hand out same offset multiple times */
1371
Mike Day0dc3f442013-09-05 14:41:35 -04001372 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001373 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001374 }
Alex Williamson04b16652010-07-02 11:13:17 -06001375
Mike Day0dc3f442013-09-05 14:41:35 -04001376 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001377 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001378
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001379 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001380
Mike Day0dc3f442013-09-05 14:41:35 -04001381 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001382 if (next_block->offset >= end) {
1383 next = MIN(next, next_block->offset);
1384 }
1385 }
1386 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001387 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001388 mingap = next - end;
1389 }
1390 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001391
1392 if (offset == RAM_ADDR_MAX) {
1393 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1394 (uint64_t)size);
1395 abort();
1396 }
1397
Alex Williamson04b16652010-07-02 11:13:17 -06001398 return offset;
1399}
1400
Juan Quintela652d7ec2012-07-20 10:37:54 +02001401ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001402{
Alex Williamsond17b5282010-06-25 11:08:38 -06001403 RAMBlock *block;
1404 ram_addr_t last = 0;
1405
Mike Day0dc3f442013-09-05 14:41:35 -04001406 rcu_read_lock();
1407 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001408 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001409 }
Mike Day0dc3f442013-09-05 14:41:35 -04001410 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001411 return last;
1412}
1413
Jason Baronddb97f12012-08-02 15:44:16 -04001414static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1415{
1416 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001417
1418 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001419 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001420 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1421 if (ret) {
1422 perror("qemu_madvise");
1423 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1424 "but dump_guest_core=off specified\n");
1425 }
1426 }
1427}
1428
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001429const char *qemu_ram_get_idstr(RAMBlock *rb)
1430{
1431 return rb->idstr;
1432}
1433
Mike Dayae3a7042013-09-05 14:41:35 -04001434/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001435void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001436{
Gongleifa53a0e2016-05-10 10:04:59 +08001437 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001438
Avi Kivityc5705a72011-12-20 15:59:12 +02001439 assert(new_block);
1440 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001441
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001442 if (dev) {
1443 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001444 if (id) {
1445 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001446 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001447 }
1448 }
1449 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1450
Gongleiab0a9952016-05-10 10:05:00 +08001451 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001452 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001453 if (block != new_block &&
1454 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001455 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1456 new_block->idstr);
1457 abort();
1458 }
1459 }
Mike Day0dc3f442013-09-05 14:41:35 -04001460 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001461}
1462
Mike Dayae3a7042013-09-05 14:41:35 -04001463/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001464void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001465{
Mike Dayae3a7042013-09-05 14:41:35 -04001466 /* FIXME: arch_init.c assumes that this is not called throughout
1467 * migration. Ignore the problem since hot-unplug during migration
1468 * does not work anyway.
1469 */
Hu Tao20cfe882014-04-02 15:13:26 +08001470 if (block) {
1471 memset(block->idstr, 0, sizeof(block->idstr));
1472 }
1473}
1474
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001475static int memory_try_enable_merging(void *addr, size_t len)
1476{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001477 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001478 /* disabled by the user */
1479 return 0;
1480 }
1481
1482 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1483}
1484
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001485/* Only legal before guest might have detected the memory size: e.g. on
1486 * incoming migration, or right after reset.
1487 *
1488 * As memory core doesn't know how is memory accessed, it is up to
1489 * resize callback to update device state and/or add assertions to detect
1490 * misuse, if necessary.
1491 */
Gongleifa53a0e2016-05-10 10:04:59 +08001492int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001493{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001494 assert(block);
1495
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001496 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001497
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001498 if (block->used_length == newsize) {
1499 return 0;
1500 }
1501
1502 if (!(block->flags & RAM_RESIZEABLE)) {
1503 error_setg_errno(errp, EINVAL,
1504 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1505 " in != 0x" RAM_ADDR_FMT, block->idstr,
1506 newsize, block->used_length);
1507 return -EINVAL;
1508 }
1509
1510 if (block->max_length < newsize) {
1511 error_setg_errno(errp, EINVAL,
1512 "Length too large: %s: 0x" RAM_ADDR_FMT
1513 " > 0x" RAM_ADDR_FMT, block->idstr,
1514 newsize, block->max_length);
1515 return -EINVAL;
1516 }
1517
1518 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1519 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001520 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1521 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001522 memory_region_set_size(block->mr, newsize);
1523 if (block->resized) {
1524 block->resized(block->idstr, newsize, block->host);
1525 }
1526 return 0;
1527}
1528
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001529/* Called with ram_list.mutex held */
1530static void dirty_memory_extend(ram_addr_t old_ram_size,
1531 ram_addr_t new_ram_size)
1532{
1533 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1534 DIRTY_MEMORY_BLOCK_SIZE);
1535 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1536 DIRTY_MEMORY_BLOCK_SIZE);
1537 int i;
1538
1539 /* Only need to extend if block count increased */
1540 if (new_num_blocks <= old_num_blocks) {
1541 return;
1542 }
1543
1544 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1545 DirtyMemoryBlocks *old_blocks;
1546 DirtyMemoryBlocks *new_blocks;
1547 int j;
1548
1549 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1550 new_blocks = g_malloc(sizeof(*new_blocks) +
1551 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1552
1553 if (old_num_blocks) {
1554 memcpy(new_blocks->blocks, old_blocks->blocks,
1555 old_num_blocks * sizeof(old_blocks->blocks[0]));
1556 }
1557
1558 for (j = old_num_blocks; j < new_num_blocks; j++) {
1559 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1560 }
1561
1562 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1563
1564 if (old_blocks) {
1565 g_free_rcu(old_blocks, rcu);
1566 }
1567 }
1568}
1569
Fam Zheng528f46a2016-03-01 14:18:18 +08001570static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001571{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001572 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001573 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001574 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001575 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001576
1577 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001578
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001579 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001580 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581
1582 if (!new_block->host) {
1583 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001584 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001585 new_block->mr, &err);
1586 if (err) {
1587 error_propagate(errp, err);
1588 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001589 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001590 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001591 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001592 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001593 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001594 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001595 error_setg_errno(errp, errno,
1596 "cannot set up guest memory '%s'",
1597 memory_region_name(new_block->mr));
1598 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001599 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001600 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001601 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001602 }
1603 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001604
Li Zhijiandd631692015-07-02 20:18:06 +08001605 new_ram_size = MAX(old_ram_size,
1606 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1607 if (new_ram_size > old_ram_size) {
1608 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001609 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001610 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001611 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1612 * QLIST (which has an RCU-friendly variant) does not have insertion at
1613 * tail, so save the last element in last_block.
1614 */
Mike Day0dc3f442013-09-05 14:41:35 -04001615 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001616 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001617 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001618 break;
1619 }
1620 }
1621 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001622 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001623 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001624 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001625 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001626 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001627 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001628 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001629
Mike Day0dc3f442013-09-05 14:41:35 -04001630 /* Write list before version */
1631 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001632 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001633 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001634
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001635 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001636 new_block->used_length,
1637 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001638
Paolo Bonzinia904c912015-01-21 16:18:35 +01001639 if (new_block->host) {
1640 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1641 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1642 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1643 if (kvm_enabled()) {
1644 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1645 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001646 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001647}
1648
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001649#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001650RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1651 bool share, const char *mem_path,
1652 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001653{
1654 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001655 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001656
1657 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001658 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001659 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001660 }
1661
1662 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1663 /*
1664 * file_ram_alloc() needs to allocate just like
1665 * phys_mem_alloc, but we haven't bothered to provide
1666 * a hook there.
1667 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001668 error_setg(errp,
1669 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001670 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671 }
1672
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001673 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001674 new_block = g_malloc0(sizeof(*new_block));
1675 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001676 new_block->used_length = size;
1677 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001678 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001679 new_block->host = file_ram_alloc(new_block, size,
1680 mem_path, errp);
1681 if (!new_block->host) {
1682 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001683 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001684 }
1685
Fam Zheng528f46a2016-03-01 14:18:18 +08001686 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001687 if (local_err) {
1688 g_free(new_block);
1689 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001690 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001691 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001692 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001693}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001694#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001695
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001696static
Fam Zheng528f46a2016-03-01 14:18:18 +08001697RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1698 void (*resized)(const char*,
1699 uint64_t length,
1700 void *host),
1701 void *host, bool resizeable,
1702 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001703{
1704 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001705 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001706
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001707 size = HOST_PAGE_ALIGN(size);
1708 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001709 new_block = g_malloc0(sizeof(*new_block));
1710 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001711 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001712 new_block->used_length = size;
1713 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001714 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001715 new_block->fd = -1;
1716 new_block->host = host;
1717 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001718 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001719 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001720 if (resizeable) {
1721 new_block->flags |= RAM_RESIZEABLE;
1722 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001723 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001724 if (local_err) {
1725 g_free(new_block);
1726 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001727 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001728 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001729 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001730}
1731
Fam Zheng528f46a2016-03-01 14:18:18 +08001732RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001733 MemoryRegion *mr, Error **errp)
1734{
1735 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1736}
1737
Fam Zheng528f46a2016-03-01 14:18:18 +08001738RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001739{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001740 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1741}
1742
Fam Zheng528f46a2016-03-01 14:18:18 +08001743RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001744 void (*resized)(const char*,
1745 uint64_t length,
1746 void *host),
1747 MemoryRegion *mr, Error **errp)
1748{
1749 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001750}
bellarde9a1ab12007-02-08 23:08:38 +00001751
Paolo Bonzini43771532013-09-09 17:58:40 +02001752static void reclaim_ramblock(RAMBlock *block)
1753{
1754 if (block->flags & RAM_PREALLOC) {
1755 ;
1756 } else if (xen_enabled()) {
1757 xen_invalidate_map_cache_entry(block->host);
1758#ifndef _WIN32
1759 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001760 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001761 close(block->fd);
1762#endif
1763 } else {
1764 qemu_anon_ram_free(block->host, block->max_length);
1765 }
1766 g_free(block);
1767}
1768
Fam Zhengf1060c52016-03-01 14:18:22 +08001769void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001770{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001771 if (!block) {
1772 return;
1773 }
1774
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001775 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001776 QLIST_REMOVE_RCU(block, next);
1777 ram_list.mru_block = NULL;
1778 /* Write list before version */
1779 smp_wmb();
1780 ram_list.version++;
1781 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001782 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001783}
1784
Huang Yingcd19cfa2011-03-02 08:56:19 +01001785#ifndef _WIN32
1786void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1787{
1788 RAMBlock *block;
1789 ram_addr_t offset;
1790 int flags;
1791 void *area, *vaddr;
1792
Mike Day0dc3f442013-09-05 14:41:35 -04001793 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001794 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001795 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001796 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001797 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001798 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001799 } else if (xen_enabled()) {
1800 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001801 } else {
1802 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001803 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001804 flags |= (block->flags & RAM_SHARED ?
1805 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001806 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1807 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001809 /*
1810 * Remap needs to match alloc. Accelerators that
1811 * set phys_mem_alloc never remap. If they did,
1812 * we'd need a remap hook here.
1813 */
1814 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1815
Huang Yingcd19cfa2011-03-02 08:56:19 +01001816 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1817 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1818 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001819 }
1820 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001821 fprintf(stderr, "Could not remap addr: "
1822 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001823 length, addr);
1824 exit(1);
1825 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001826 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001827 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001828 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001829 }
1830 }
1831}
1832#endif /* !_WIN32 */
1833
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001834int qemu_get_ram_fd(ram_addr_t addr)
1835{
Mike Dayae3a7042013-09-05 14:41:35 -04001836 RAMBlock *block;
1837 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001838
Mike Day0dc3f442013-09-05 14:41:35 -04001839 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001840 block = qemu_get_ram_block(addr);
1841 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001842 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001843 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001844}
1845
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001846void qemu_set_ram_fd(ram_addr_t addr, int fd)
1847{
1848 RAMBlock *block;
1849
1850 rcu_read_lock();
1851 block = qemu_get_ram_block(addr);
1852 block->fd = fd;
1853 rcu_read_unlock();
1854}
1855
Damjan Marion3fd74b82014-06-26 23:01:32 +02001856void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1857{
Mike Dayae3a7042013-09-05 14:41:35 -04001858 RAMBlock *block;
1859 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001860
Mike Day0dc3f442013-09-05 14:41:35 -04001861 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001862 block = qemu_get_ram_block(addr);
1863 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001864 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001865 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001866}
1867
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001868/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001869 * This should not be used for general purpose DMA. Use address_space_map
1870 * or address_space_rw instead. For local memory (e.g. video ram) that the
1871 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001872 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001873 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001874 */
Gonglei3655cb92016-02-20 10:35:20 +08001875void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001876{
Gonglei3655cb92016-02-20 10:35:20 +08001877 RAMBlock *block = ram_block;
1878
1879 if (block == NULL) {
1880 block = qemu_get_ram_block(addr);
1881 }
Mike Dayae3a7042013-09-05 14:41:35 -04001882
1883 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001884 /* We need to check if the requested address is in the RAM
1885 * because we don't want to map the entire memory in QEMU.
1886 * In that case just map until the end of the page.
1887 */
1888 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001889 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001890 }
Mike Dayae3a7042013-09-05 14:41:35 -04001891
1892 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001893 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001894 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001895}
1896
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001897/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001898 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001899 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001900 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001901 */
Gonglei3655cb92016-02-20 10:35:20 +08001902static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1903 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001904{
Gonglei3655cb92016-02-20 10:35:20 +08001905 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001906 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001907 if (*size == 0) {
1908 return NULL;
1909 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001910
Gonglei3655cb92016-02-20 10:35:20 +08001911 if (block == NULL) {
1912 block = qemu_get_ram_block(addr);
1913 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001914 offset_inside_block = addr - block->offset;
1915 *size = MIN(*size, block->max_length - offset_inside_block);
1916
1917 if (xen_enabled() && block->host == NULL) {
1918 /* We need to check if the requested address is in the RAM
1919 * because we don't want to map the entire memory in QEMU.
1920 * In that case just map the requested area.
1921 */
1922 if (block->offset == 0) {
1923 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001924 }
1925
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001926 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001927 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001928
1929 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001930}
1931
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001932/*
1933 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1934 * in that RAMBlock.
1935 *
1936 * ptr: Host pointer to look up
1937 * round_offset: If true round the result offset down to a page boundary
1938 * *ram_addr: set to result ram_addr
1939 * *offset: set to result offset within the RAMBlock
1940 *
1941 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001942 *
1943 * By the time this function returns, the returned pointer is not protected
1944 * by RCU anymore. If the caller is not within an RCU critical section and
1945 * does not hold the iothread lock, it must have other means of protecting the
1946 * pointer, such as a reference to the region that includes the incoming
1947 * ram_addr_t.
1948 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001949RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1950 ram_addr_t *ram_addr,
1951 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001952{
pbrook94a6b542009-04-11 17:15:54 +00001953 RAMBlock *block;
1954 uint8_t *host = ptr;
1955
Jan Kiszka868bb332011-06-21 22:59:09 +02001956 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001957 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001958 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001959 block = qemu_get_ram_block(*ram_addr);
1960 if (block) {
1961 *offset = (host - block->host);
1962 }
Mike Day0dc3f442013-09-05 14:41:35 -04001963 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001964 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001965 }
1966
Mike Day0dc3f442013-09-05 14:41:35 -04001967 rcu_read_lock();
1968 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001969 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001970 goto found;
1971 }
1972
Mike Day0dc3f442013-09-05 14:41:35 -04001973 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001974 /* This case append when the block is not mapped. */
1975 if (block->host == NULL) {
1976 continue;
1977 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001978 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001979 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001980 }
pbrook94a6b542009-04-11 17:15:54 +00001981 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001982
Mike Day0dc3f442013-09-05 14:41:35 -04001983 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001984 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001985
1986found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001987 *offset = (host - block->host);
1988 if (round_offset) {
1989 *offset &= TARGET_PAGE_MASK;
1990 }
1991 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001992 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001993 return block;
1994}
1995
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001996/*
1997 * Finds the named RAMBlock
1998 *
1999 * name: The name of RAMBlock to find
2000 *
2001 * Returns: RAMBlock (or NULL if not found)
2002 */
2003RAMBlock *qemu_ram_block_by_name(const char *name)
2004{
2005 RAMBlock *block;
2006
2007 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2008 if (!strcmp(name, block->idstr)) {
2009 return block;
2010 }
2011 }
2012
2013 return NULL;
2014}
2015
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002016/* Some of the softmmu routines need to translate from a host pointer
2017 (typically a TLB entry) back to a ram offset. */
2018MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2019{
2020 RAMBlock *block;
2021 ram_addr_t offset; /* Not used */
2022
2023 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2024
2025 if (!block) {
2026 return NULL;
2027 }
2028
2029 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002030}
Alex Williamsonf471a172010-06-11 11:11:42 -06002031
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002032/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002033static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002034 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002035{
Juan Quintela52159192013-10-08 12:44:04 +02002036 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002037 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002038 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002039 switch (size) {
2040 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002041 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002042 break;
2043 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002044 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002045 break;
2046 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002047 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002048 break;
2049 default:
2050 abort();
2051 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002052 /* Set both VGA and migration bits for simplicity and to remove
2053 * the notdirty callback faster.
2054 */
2055 cpu_physical_memory_set_dirty_range(ram_addr, size,
2056 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002057 /* we remove the notdirty callback only if the code has been
2058 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002059 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002060 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002061 }
bellard1ccde1c2004-02-06 19:46:14 +00002062}
2063
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002064static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2065 unsigned size, bool is_write)
2066{
2067 return is_write;
2068}
2069
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002070static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002071 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002072 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002073 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002074};
2075
pbrook0f459d12008-06-09 00:20:13 +00002076/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002077static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002078{
Andreas Färber93afead2013-08-26 03:41:01 +02002079 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002080 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002081 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002082 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002083 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002084 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002085 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002086
Andreas Färberff4700b2013-08-26 18:23:18 +02002087 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002088 /* We re-entered the check after replacing the TB. Now raise
2089 * the debug interrupt so that is will trigger after the
2090 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002091 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002092 return;
2093 }
Andreas Färber93afead2013-08-26 03:41:01 +02002094 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002095 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002096 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2097 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002098 if (flags == BP_MEM_READ) {
2099 wp->flags |= BP_WATCHPOINT_HIT_READ;
2100 } else {
2101 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2102 }
2103 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002104 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002105 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002106 if (wp->flags & BP_CPU &&
2107 !cc->debug_check_watchpoint(cpu, wp)) {
2108 wp->flags &= ~BP_WATCHPOINT_HIT;
2109 continue;
2110 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002111 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002112 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002113 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002114 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002115 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002116 } else {
2117 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002118 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002119 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002120 }
aliguori06d55cc2008-11-18 20:24:06 +00002121 }
aliguori6e140f22008-11-18 20:37:55 +00002122 } else {
2123 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002124 }
2125 }
2126}
2127
pbrook6658ffb2007-03-16 23:58:11 +00002128/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2129 so these check for a hit then pass through to the normal out-of-line
2130 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002131static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2132 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002133{
Peter Maydell66b9b432015-04-26 16:49:24 +01002134 MemTxResult res;
2135 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002136 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2137 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002138
Peter Maydell66b9b432015-04-26 16:49:24 +01002139 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002140 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002141 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002142 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002143 break;
2144 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002145 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002146 break;
2147 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002148 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002149 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002150 default: abort();
2151 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002152 *pdata = data;
2153 return res;
2154}
2155
2156static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2157 uint64_t val, unsigned size,
2158 MemTxAttrs attrs)
2159{
2160 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002161 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2162 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002163
2164 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2165 switch (size) {
2166 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002167 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002168 break;
2169 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002170 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002171 break;
2172 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002173 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002174 break;
2175 default: abort();
2176 }
2177 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002178}
2179
Avi Kivity1ec9b902012-01-02 12:47:48 +02002180static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002181 .read_with_attrs = watch_mem_read,
2182 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002183 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002184};
pbrook6658ffb2007-03-16 23:58:11 +00002185
Peter Maydellf25a49e2015-04-26 16:49:24 +01002186static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2187 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002188{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002189 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002190 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002191 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002192
blueswir1db7b5422007-05-26 17:36:03 +00002193#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002194 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002195 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002196#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002197 res = address_space_read(subpage->as, addr + subpage->base,
2198 attrs, buf, len);
2199 if (res) {
2200 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002201 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002202 switch (len) {
2203 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002204 *data = ldub_p(buf);
2205 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002206 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002207 *data = lduw_p(buf);
2208 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002209 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002210 *data = ldl_p(buf);
2211 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002212 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002213 *data = ldq_p(buf);
2214 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002215 default:
2216 abort();
2217 }
blueswir1db7b5422007-05-26 17:36:03 +00002218}
2219
Peter Maydellf25a49e2015-04-26 16:49:24 +01002220static MemTxResult subpage_write(void *opaque, hwaddr addr,
2221 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002222{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002223 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002224 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002225
blueswir1db7b5422007-05-26 17:36:03 +00002226#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002227 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002228 " value %"PRIx64"\n",
2229 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002230#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002231 switch (len) {
2232 case 1:
2233 stb_p(buf, value);
2234 break;
2235 case 2:
2236 stw_p(buf, value);
2237 break;
2238 case 4:
2239 stl_p(buf, value);
2240 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002241 case 8:
2242 stq_p(buf, value);
2243 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002244 default:
2245 abort();
2246 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002247 return address_space_write(subpage->as, addr + subpage->base,
2248 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002249}
2250
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002251static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002252 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002253{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002254 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002255#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002256 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002257 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002258#endif
2259
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002260 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002261 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002262}
2263
Avi Kivity70c68e42012-01-02 12:32:48 +02002264static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002265 .read_with_attrs = subpage_read,
2266 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002267 .impl.min_access_size = 1,
2268 .impl.max_access_size = 8,
2269 .valid.min_access_size = 1,
2270 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002271 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002272 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002273};
2274
Anthony Liguoric227f092009-10-01 16:12:16 -05002275static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002276 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002277{
2278 int idx, eidx;
2279
2280 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2281 return -1;
2282 idx = SUBPAGE_IDX(start);
2283 eidx = SUBPAGE_IDX(end);
2284#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002285 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2286 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002287#endif
blueswir1db7b5422007-05-26 17:36:03 +00002288 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002289 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002290 }
2291
2292 return 0;
2293}
2294
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002295static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002296{
Anthony Liguoric227f092009-10-01 16:12:16 -05002297 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002298
Anthony Liguori7267c092011-08-20 22:09:37 -05002299 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002300
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002301 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002302 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002303 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002304 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002305 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002306#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002307 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2308 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002309#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002310 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002311
2312 return mmio;
2313}
2314
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002315static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2316 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002317{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002318 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002319 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002320 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002321 .mr = mr,
2322 .offset_within_address_space = 0,
2323 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002324 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002325 };
2326
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002327 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002328}
2329
Peter Maydella54c87b2016-01-21 14:15:05 +00002330MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002331{
Peter Maydella54c87b2016-01-21 14:15:05 +00002332 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2333 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002334 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002335 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002336
2337 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002338}
2339
Avi Kivitye9179ce2009-06-14 11:38:52 +03002340static void io_mem_init(void)
2341{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002342 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002343 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002344 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002345 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002346 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002347 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002348 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002349}
2350
Avi Kivityac1970f2012-10-03 16:22:53 +02002351static void mem_begin(MemoryListener *listener)
2352{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002353 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002354 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2355 uint16_t n;
2356
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002357 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002358 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002359 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002360 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002361 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002362 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002363 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002364 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002365
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002366 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002367 d->as = as;
2368 as->next_dispatch = d;
2369}
2370
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002371static void address_space_dispatch_free(AddressSpaceDispatch *d)
2372{
2373 phys_sections_free(&d->map);
2374 g_free(d);
2375}
2376
Paolo Bonzini00752702013-05-29 12:13:54 +02002377static void mem_commit(MemoryListener *listener)
2378{
2379 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002380 AddressSpaceDispatch *cur = as->dispatch;
2381 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002382
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002383 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002384
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002385 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002386 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002387 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002388 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002389}
2390
Avi Kivity1d711482012-10-02 18:54:45 +02002391static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002392{
Peter Maydell32857f42015-10-01 15:29:50 +01002393 CPUAddressSpace *cpuas;
2394 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002395
2396 /* since each CPU stores ram addresses in its TLB cache, we must
2397 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002398 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2399 cpu_reloading_memory_map();
2400 /* The CPU and TLB are protected by the iothread lock.
2401 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2402 * may have split the RCU critical section.
2403 */
2404 d = atomic_rcu_read(&cpuas->as->dispatch);
2405 cpuas->memory_dispatch = d;
2406 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002407}
2408
Avi Kivityac1970f2012-10-03 16:22:53 +02002409void address_space_init_dispatch(AddressSpace *as)
2410{
Paolo Bonzini00752702013-05-29 12:13:54 +02002411 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002412 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002413 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002414 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002415 .region_add = mem_add,
2416 .region_nop = mem_add,
2417 .priority = 0,
2418 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002419 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002420}
2421
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002422void address_space_unregister(AddressSpace *as)
2423{
2424 memory_listener_unregister(&as->dispatch_listener);
2425}
2426
Avi Kivity83f3c252012-10-07 12:59:55 +02002427void address_space_destroy_dispatch(AddressSpace *as)
2428{
2429 AddressSpaceDispatch *d = as->dispatch;
2430
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002431 atomic_rcu_set(&as->dispatch, NULL);
2432 if (d) {
2433 call_rcu(d, address_space_dispatch_free, rcu);
2434 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002435}
2436
Avi Kivity62152b82011-07-26 14:26:14 +03002437static void memory_map_init(void)
2438{
Anthony Liguori7267c092011-08-20 22:09:37 -05002439 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002440
Paolo Bonzini57271d62013-11-07 17:14:37 +01002441 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002442 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002443
Anthony Liguori7267c092011-08-20 22:09:37 -05002444 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002445 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2446 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002447 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002448}
2449
2450MemoryRegion *get_system_memory(void)
2451{
2452 return system_memory;
2453}
2454
Avi Kivity309cb472011-08-08 16:09:03 +03002455MemoryRegion *get_system_io(void)
2456{
2457 return system_io;
2458}
2459
pbrooke2eef172008-06-08 01:09:01 +00002460#endif /* !defined(CONFIG_USER_ONLY) */
2461
bellard13eb76e2004-01-24 15:23:36 +00002462/* physical memory access (slow version, mainly for debug) */
2463#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002464int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002465 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002466{
2467 int l, flags;
2468 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002469 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002470
2471 while (len > 0) {
2472 page = addr & TARGET_PAGE_MASK;
2473 l = (page + TARGET_PAGE_SIZE) - addr;
2474 if (l > len)
2475 l = len;
2476 flags = page_get_flags(page);
2477 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002478 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002479 if (is_write) {
2480 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002481 return -1;
bellard579a97f2007-11-11 14:26:47 +00002482 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002483 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002484 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002485 memcpy(p, buf, l);
2486 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002487 } else {
2488 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002489 return -1;
bellard579a97f2007-11-11 14:26:47 +00002490 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002491 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002492 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002493 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002494 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002495 }
2496 len -= l;
2497 buf += l;
2498 addr += l;
2499 }
Paul Brooka68fe892010-03-01 00:08:59 +00002500 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002501}
bellard8df1cd02005-01-28 22:37:22 +00002502
bellard13eb76e2004-01-24 15:23:36 +00002503#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002504
Paolo Bonzini845b6212015-03-23 11:45:53 +01002505static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002506 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002507{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002508 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2509 /* No early return if dirty_log_mask is or becomes 0, because
2510 * cpu_physical_memory_set_dirty_range will still call
2511 * xen_modified_memory.
2512 */
2513 if (dirty_log_mask) {
2514 dirty_log_mask =
2515 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002516 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002517 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2518 tb_invalidate_phys_range(addr, addr + length);
2519 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2520 }
2521 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002522}
2523
Richard Henderson23326162013-07-08 14:55:59 -07002524static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002525{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002526 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002527
2528 /* Regions are assumed to support 1-4 byte accesses unless
2529 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002530 if (access_size_max == 0) {
2531 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002532 }
Richard Henderson23326162013-07-08 14:55:59 -07002533
2534 /* Bound the maximum access by the alignment of the address. */
2535 if (!mr->ops->impl.unaligned) {
2536 unsigned align_size_max = addr & -addr;
2537 if (align_size_max != 0 && align_size_max < access_size_max) {
2538 access_size_max = align_size_max;
2539 }
2540 }
2541
2542 /* Don't attempt accesses larger than the maximum. */
2543 if (l > access_size_max) {
2544 l = access_size_max;
2545 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002546 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002547
2548 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002549}
2550
Jan Kiszka4840f102015-06-18 18:47:22 +02002551static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002552{
Jan Kiszka4840f102015-06-18 18:47:22 +02002553 bool unlocked = !qemu_mutex_iothread_locked();
2554 bool release_lock = false;
2555
2556 if (unlocked && mr->global_locking) {
2557 qemu_mutex_lock_iothread();
2558 unlocked = false;
2559 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002560 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002561 if (mr->flush_coalesced_mmio) {
2562 if (unlocked) {
2563 qemu_mutex_lock_iothread();
2564 }
2565 qemu_flush_coalesced_mmio_buffer();
2566 if (unlocked) {
2567 qemu_mutex_unlock_iothread();
2568 }
2569 }
2570
2571 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002572}
2573
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002574/* Called within RCU critical section. */
2575static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2576 MemTxAttrs attrs,
2577 const uint8_t *buf,
2578 int len, hwaddr addr1,
2579 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002580{
bellard13eb76e2004-01-24 15:23:36 +00002581 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002582 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002583 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002584 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002585
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002586 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002587 if (!memory_access_is_direct(mr, true)) {
2588 release_lock |= prepare_mmio_access(mr);
2589 l = memory_access_size(mr, l, addr1);
2590 /* XXX: could force current_cpu to NULL to avoid
2591 potential bugs */
2592 switch (l) {
2593 case 8:
2594 /* 64 bit write access */
2595 val = ldq_p(buf);
2596 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2597 attrs);
2598 break;
2599 case 4:
2600 /* 32 bit write access */
2601 val = ldl_p(buf);
2602 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2603 attrs);
2604 break;
2605 case 2:
2606 /* 16 bit write access */
2607 val = lduw_p(buf);
2608 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2609 attrs);
2610 break;
2611 case 1:
2612 /* 8 bit write access */
2613 val = ldub_p(buf);
2614 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2615 attrs);
2616 break;
2617 default:
2618 abort();
bellard13eb76e2004-01-24 15:23:36 +00002619 }
2620 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002621 addr1 += memory_region_get_ram_addr(mr);
2622 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002623 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002624 memcpy(ptr, buf, l);
2625 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002626 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002627
2628 if (release_lock) {
2629 qemu_mutex_unlock_iothread();
2630 release_lock = false;
2631 }
2632
bellard13eb76e2004-01-24 15:23:36 +00002633 len -= l;
2634 buf += l;
2635 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002636
2637 if (!len) {
2638 break;
2639 }
2640
2641 l = len;
2642 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002643 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002644
Peter Maydell3b643492015-04-26 16:49:23 +01002645 return result;
bellard13eb76e2004-01-24 15:23:36 +00002646}
bellard8df1cd02005-01-28 22:37:22 +00002647
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002648MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2649 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002650{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002651 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002652 hwaddr addr1;
2653 MemoryRegion *mr;
2654 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002655
2656 if (len > 0) {
2657 rcu_read_lock();
2658 l = len;
2659 mr = address_space_translate(as, addr, &addr1, &l, true);
2660 result = address_space_write_continue(as, addr, attrs, buf, len,
2661 addr1, l, mr);
2662 rcu_read_unlock();
2663 }
2664
2665 return result;
2666}
2667
2668/* Called within RCU critical section. */
2669MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2670 MemTxAttrs attrs, uint8_t *buf,
2671 int len, hwaddr addr1, hwaddr l,
2672 MemoryRegion *mr)
2673{
2674 uint8_t *ptr;
2675 uint64_t val;
2676 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002677 bool release_lock = false;
2678
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002679 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002680 if (!memory_access_is_direct(mr, false)) {
2681 /* I/O case */
2682 release_lock |= prepare_mmio_access(mr);
2683 l = memory_access_size(mr, l, addr1);
2684 switch (l) {
2685 case 8:
2686 /* 64 bit read access */
2687 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2688 attrs);
2689 stq_p(buf, val);
2690 break;
2691 case 4:
2692 /* 32 bit read access */
2693 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2694 attrs);
2695 stl_p(buf, val);
2696 break;
2697 case 2:
2698 /* 16 bit read access */
2699 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2700 attrs);
2701 stw_p(buf, val);
2702 break;
2703 case 1:
2704 /* 8 bit read access */
2705 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2706 attrs);
2707 stb_p(buf, val);
2708 break;
2709 default:
2710 abort();
2711 }
2712 } else {
2713 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002714 ptr = qemu_get_ram_ptr(mr->ram_block,
2715 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002716 memcpy(buf, ptr, l);
2717 }
2718
2719 if (release_lock) {
2720 qemu_mutex_unlock_iothread();
2721 release_lock = false;
2722 }
2723
2724 len -= l;
2725 buf += l;
2726 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002727
2728 if (!len) {
2729 break;
2730 }
2731
2732 l = len;
2733 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002734 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002735
2736 return result;
2737}
2738
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002739MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2740 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002741{
2742 hwaddr l;
2743 hwaddr addr1;
2744 MemoryRegion *mr;
2745 MemTxResult result = MEMTX_OK;
2746
2747 if (len > 0) {
2748 rcu_read_lock();
2749 l = len;
2750 mr = address_space_translate(as, addr, &addr1, &l, false);
2751 result = address_space_read_continue(as, addr, attrs, buf, len,
2752 addr1, l, mr);
2753 rcu_read_unlock();
2754 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002755
2756 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002757}
2758
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002759MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2760 uint8_t *buf, int len, bool is_write)
2761{
2762 if (is_write) {
2763 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2764 } else {
2765 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2766 }
2767}
Avi Kivityac1970f2012-10-03 16:22:53 +02002768
Avi Kivitya8170e52012-10-23 12:30:10 +02002769void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002770 int len, int is_write)
2771{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002772 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2773 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002774}
2775
Alexander Graf582b55a2013-12-11 14:17:44 +01002776enum write_rom_type {
2777 WRITE_DATA,
2778 FLUSH_CACHE,
2779};
2780
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002781static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002782 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002783{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002784 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002785 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002786 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002787 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002788
Paolo Bonzini41063e12015-03-18 14:21:43 +01002789 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002790 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002791 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002792 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002793
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002794 if (!(memory_region_is_ram(mr) ||
2795 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002796 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002797 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002798 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002799 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002800 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002801 switch (type) {
2802 case WRITE_DATA:
2803 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002804 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002805 break;
2806 case FLUSH_CACHE:
2807 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2808 break;
2809 }
bellardd0ecd2a2006-04-23 17:14:48 +00002810 }
2811 len -= l;
2812 buf += l;
2813 addr += l;
2814 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002815 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002816}
2817
Alexander Graf582b55a2013-12-11 14:17:44 +01002818/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002819void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002820 const uint8_t *buf, int len)
2821{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002822 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002823}
2824
2825void cpu_flush_icache_range(hwaddr start, int len)
2826{
2827 /*
2828 * This function should do the same thing as an icache flush that was
2829 * triggered from within the guest. For TCG we are always cache coherent,
2830 * so there is no need to flush anything. For KVM / Xen we need to flush
2831 * the host's instruction cache at least.
2832 */
2833 if (tcg_enabled()) {
2834 return;
2835 }
2836
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002837 cpu_physical_memory_write_rom_internal(&address_space_memory,
2838 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002839}
2840
aliguori6d16c2f2009-01-22 16:59:11 +00002841typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002842 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002843 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002844 hwaddr addr;
2845 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002846 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002847} BounceBuffer;
2848
2849static BounceBuffer bounce;
2850
aliguoriba223c22009-01-22 16:59:16 +00002851typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002852 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002853 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002854} MapClient;
2855
Fam Zheng38e047b2015-03-16 17:03:35 +08002856QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002857static QLIST_HEAD(map_client_list, MapClient) map_client_list
2858 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002859
Fam Zhenge95205e2015-03-16 17:03:37 +08002860static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002861{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002862 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002863 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002864}
2865
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002866static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002867{
2868 MapClient *client;
2869
Blue Swirl72cf2d42009-09-12 07:36:22 +00002870 while (!QLIST_EMPTY(&map_client_list)) {
2871 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002872 qemu_bh_schedule(client->bh);
2873 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002874 }
2875}
2876
Fam Zhenge95205e2015-03-16 17:03:37 +08002877void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002878{
2879 MapClient *client = g_malloc(sizeof(*client));
2880
Fam Zheng38e047b2015-03-16 17:03:35 +08002881 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002882 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002883 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002884 if (!atomic_read(&bounce.in_use)) {
2885 cpu_notify_map_clients_locked();
2886 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002887 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002888}
2889
Fam Zheng38e047b2015-03-16 17:03:35 +08002890void cpu_exec_init_all(void)
2891{
2892 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002893 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002894 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002895 qemu_mutex_init(&map_client_list_lock);
2896}
2897
Fam Zhenge95205e2015-03-16 17:03:37 +08002898void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002899{
Fam Zhenge95205e2015-03-16 17:03:37 +08002900 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002901
Fam Zhenge95205e2015-03-16 17:03:37 +08002902 qemu_mutex_lock(&map_client_list_lock);
2903 QLIST_FOREACH(client, &map_client_list, link) {
2904 if (client->bh == bh) {
2905 cpu_unregister_map_client_do(client);
2906 break;
2907 }
2908 }
2909 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002910}
2911
2912static void cpu_notify_map_clients(void)
2913{
Fam Zheng38e047b2015-03-16 17:03:35 +08002914 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002915 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002916 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002917}
2918
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002919bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2920{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002921 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002922 hwaddr l, xlat;
2923
Paolo Bonzini41063e12015-03-18 14:21:43 +01002924 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002925 while (len > 0) {
2926 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002927 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2928 if (!memory_access_is_direct(mr, is_write)) {
2929 l = memory_access_size(mr, l, addr);
2930 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002931 return false;
2932 }
2933 }
2934
2935 len -= l;
2936 addr += l;
2937 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002938 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002939 return true;
2940}
2941
aliguori6d16c2f2009-01-22 16:59:11 +00002942/* Map a physical memory region into a host virtual address.
2943 * May map a subset of the requested range, given by and returned in *plen.
2944 * May return NULL if resources needed to perform the mapping are exhausted.
2945 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002946 * Use cpu_register_map_client() to know when retrying the map operation is
2947 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002948 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002949void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002950 hwaddr addr,
2951 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002952 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002953{
Avi Kivitya8170e52012-10-23 12:30:10 +02002954 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002955 hwaddr done = 0;
2956 hwaddr l, xlat, base;
2957 MemoryRegion *mr, *this_mr;
2958 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002959 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002960
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002961 if (len == 0) {
2962 return NULL;
2963 }
aliguori6d16c2f2009-01-22 16:59:11 +00002964
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002965 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002966 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002967 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002968
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002969 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002970 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002971 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002972 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002973 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002974 /* Avoid unbounded allocations */
2975 l = MIN(l, TARGET_PAGE_SIZE);
2976 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002977 bounce.addr = addr;
2978 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002979
2980 memory_region_ref(mr);
2981 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002982 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002983 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2984 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002985 }
aliguori6d16c2f2009-01-22 16:59:11 +00002986
Paolo Bonzini41063e12015-03-18 14:21:43 +01002987 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002988 *plen = l;
2989 return bounce.buffer;
2990 }
2991
2992 base = xlat;
2993 raddr = memory_region_get_ram_addr(mr);
2994
2995 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002996 len -= l;
2997 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002998 done += l;
2999 if (len == 0) {
3000 break;
3001 }
3002
3003 l = len;
3004 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3005 if (this_mr != mr || xlat != base + done) {
3006 break;
3007 }
aliguori6d16c2f2009-01-22 16:59:11 +00003008 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003009
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003010 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003011 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003012 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003013 rcu_read_unlock();
3014
3015 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003016}
3017
Avi Kivityac1970f2012-10-03 16:22:53 +02003018/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003019 * Will also mark the memory as dirty if is_write == 1. access_len gives
3020 * the amount of memory that was actually read or written by the caller.
3021 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003022void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3023 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003024{
3025 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003026 MemoryRegion *mr;
3027 ram_addr_t addr1;
3028
3029 mr = qemu_ram_addr_from_host(buffer, &addr1);
3030 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003031 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003032 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003033 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003034 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003035 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003036 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003037 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003038 return;
3039 }
3040 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003041 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3042 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003043 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003044 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003045 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003046 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003047 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003048 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003049}
bellardd0ecd2a2006-04-23 17:14:48 +00003050
Avi Kivitya8170e52012-10-23 12:30:10 +02003051void *cpu_physical_memory_map(hwaddr addr,
3052 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003053 int is_write)
3054{
3055 return address_space_map(&address_space_memory, addr, plen, is_write);
3056}
3057
Avi Kivitya8170e52012-10-23 12:30:10 +02003058void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3059 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003060{
3061 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3062}
3063
bellard8df1cd02005-01-28 22:37:22 +00003064/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003065static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3066 MemTxAttrs attrs,
3067 MemTxResult *result,
3068 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003069{
bellard8df1cd02005-01-28 22:37:22 +00003070 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003071 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003072 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003073 hwaddr l = 4;
3074 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003075 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003076 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003077
Paolo Bonzini41063e12015-03-18 14:21:43 +01003078 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003079 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003080 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003081 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003082
bellard8df1cd02005-01-28 22:37:22 +00003083 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003084 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003085#if defined(TARGET_WORDS_BIGENDIAN)
3086 if (endian == DEVICE_LITTLE_ENDIAN) {
3087 val = bswap32(val);
3088 }
3089#else
3090 if (endian == DEVICE_BIG_ENDIAN) {
3091 val = bswap32(val);
3092 }
3093#endif
bellard8df1cd02005-01-28 22:37:22 +00003094 } else {
3095 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003096 ptr = qemu_get_ram_ptr(mr->ram_block,
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003097 memory_region_get_ram_addr(mr) + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003098 switch (endian) {
3099 case DEVICE_LITTLE_ENDIAN:
3100 val = ldl_le_p(ptr);
3101 break;
3102 case DEVICE_BIG_ENDIAN:
3103 val = ldl_be_p(ptr);
3104 break;
3105 default:
3106 val = ldl_p(ptr);
3107 break;
3108 }
Peter Maydell50013112015-04-26 16:49:24 +01003109 r = MEMTX_OK;
3110 }
3111 if (result) {
3112 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003113 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003114 if (release_lock) {
3115 qemu_mutex_unlock_iothread();
3116 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003117 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003118 return val;
3119}
3120
Peter Maydell50013112015-04-26 16:49:24 +01003121uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3122 MemTxAttrs attrs, MemTxResult *result)
3123{
3124 return address_space_ldl_internal(as, addr, attrs, result,
3125 DEVICE_NATIVE_ENDIAN);
3126}
3127
3128uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3129 MemTxAttrs attrs, MemTxResult *result)
3130{
3131 return address_space_ldl_internal(as, addr, attrs, result,
3132 DEVICE_LITTLE_ENDIAN);
3133}
3134
3135uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3136 MemTxAttrs attrs, MemTxResult *result)
3137{
3138 return address_space_ldl_internal(as, addr, attrs, result,
3139 DEVICE_BIG_ENDIAN);
3140}
3141
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003142uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003143{
Peter Maydell50013112015-04-26 16:49:24 +01003144 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003145}
3146
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003147uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003148{
Peter Maydell50013112015-04-26 16:49:24 +01003149 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003150}
3151
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003152uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003153{
Peter Maydell50013112015-04-26 16:49:24 +01003154 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003155}
3156
bellard84b7b8e2005-11-28 21:19:04 +00003157/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003158static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3159 MemTxAttrs attrs,
3160 MemTxResult *result,
3161 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003162{
bellard84b7b8e2005-11-28 21:19:04 +00003163 uint8_t *ptr;
3164 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003165 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003166 hwaddr l = 8;
3167 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003168 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003169 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003170
Paolo Bonzini41063e12015-03-18 14:21:43 +01003171 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003172 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003173 false);
3174 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003175 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003176
bellard84b7b8e2005-11-28 21:19:04 +00003177 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003178 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003179#if defined(TARGET_WORDS_BIGENDIAN)
3180 if (endian == DEVICE_LITTLE_ENDIAN) {
3181 val = bswap64(val);
3182 }
3183#else
3184 if (endian == DEVICE_BIG_ENDIAN) {
3185 val = bswap64(val);
3186 }
3187#endif
bellard84b7b8e2005-11-28 21:19:04 +00003188 } else {
3189 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003190 ptr = qemu_get_ram_ptr(mr->ram_block,
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003191 memory_region_get_ram_addr(mr) + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003192 switch (endian) {
3193 case DEVICE_LITTLE_ENDIAN:
3194 val = ldq_le_p(ptr);
3195 break;
3196 case DEVICE_BIG_ENDIAN:
3197 val = ldq_be_p(ptr);
3198 break;
3199 default:
3200 val = ldq_p(ptr);
3201 break;
3202 }
Peter Maydell50013112015-04-26 16:49:24 +01003203 r = MEMTX_OK;
3204 }
3205 if (result) {
3206 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003207 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003208 if (release_lock) {
3209 qemu_mutex_unlock_iothread();
3210 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003211 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003212 return val;
3213}
3214
Peter Maydell50013112015-04-26 16:49:24 +01003215uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3216 MemTxAttrs attrs, MemTxResult *result)
3217{
3218 return address_space_ldq_internal(as, addr, attrs, result,
3219 DEVICE_NATIVE_ENDIAN);
3220}
3221
3222uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3223 MemTxAttrs attrs, MemTxResult *result)
3224{
3225 return address_space_ldq_internal(as, addr, attrs, result,
3226 DEVICE_LITTLE_ENDIAN);
3227}
3228
3229uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3230 MemTxAttrs attrs, MemTxResult *result)
3231{
3232 return address_space_ldq_internal(as, addr, attrs, result,
3233 DEVICE_BIG_ENDIAN);
3234}
3235
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003236uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003237{
Peter Maydell50013112015-04-26 16:49:24 +01003238 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003239}
3240
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003241uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003242{
Peter Maydell50013112015-04-26 16:49:24 +01003243 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003244}
3245
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003246uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003247{
Peter Maydell50013112015-04-26 16:49:24 +01003248 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003249}
3250
bellardaab33092005-10-30 20:48:42 +00003251/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003252uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3253 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003254{
3255 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003256 MemTxResult r;
3257
3258 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3259 if (result) {
3260 *result = r;
3261 }
bellardaab33092005-10-30 20:48:42 +00003262 return val;
3263}
3264
Peter Maydell50013112015-04-26 16:49:24 +01003265uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3266{
3267 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3268}
3269
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003270/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003271static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3272 hwaddr addr,
3273 MemTxAttrs attrs,
3274 MemTxResult *result,
3275 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003276{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003277 uint8_t *ptr;
3278 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003279 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003280 hwaddr l = 2;
3281 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003282 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003283 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003284
Paolo Bonzini41063e12015-03-18 14:21:43 +01003285 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003286 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003287 false);
3288 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003289 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003290
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003291 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003292 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003293#if defined(TARGET_WORDS_BIGENDIAN)
3294 if (endian == DEVICE_LITTLE_ENDIAN) {
3295 val = bswap16(val);
3296 }
3297#else
3298 if (endian == DEVICE_BIG_ENDIAN) {
3299 val = bswap16(val);
3300 }
3301#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003302 } else {
3303 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003304 ptr = qemu_get_ram_ptr(mr->ram_block,
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003305 memory_region_get_ram_addr(mr) + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003306 switch (endian) {
3307 case DEVICE_LITTLE_ENDIAN:
3308 val = lduw_le_p(ptr);
3309 break;
3310 case DEVICE_BIG_ENDIAN:
3311 val = lduw_be_p(ptr);
3312 break;
3313 default:
3314 val = lduw_p(ptr);
3315 break;
3316 }
Peter Maydell50013112015-04-26 16:49:24 +01003317 r = MEMTX_OK;
3318 }
3319 if (result) {
3320 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003321 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003322 if (release_lock) {
3323 qemu_mutex_unlock_iothread();
3324 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003325 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003326 return val;
bellardaab33092005-10-30 20:48:42 +00003327}
3328
Peter Maydell50013112015-04-26 16:49:24 +01003329uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3330 MemTxAttrs attrs, MemTxResult *result)
3331{
3332 return address_space_lduw_internal(as, addr, attrs, result,
3333 DEVICE_NATIVE_ENDIAN);
3334}
3335
3336uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3337 MemTxAttrs attrs, MemTxResult *result)
3338{
3339 return address_space_lduw_internal(as, addr, attrs, result,
3340 DEVICE_LITTLE_ENDIAN);
3341}
3342
3343uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3344 MemTxAttrs attrs, MemTxResult *result)
3345{
3346 return address_space_lduw_internal(as, addr, attrs, result,
3347 DEVICE_BIG_ENDIAN);
3348}
3349
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003350uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003351{
Peter Maydell50013112015-04-26 16:49:24 +01003352 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003353}
3354
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003355uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356{
Peter Maydell50013112015-04-26 16:49:24 +01003357 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358}
3359
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003360uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003361{
Peter Maydell50013112015-04-26 16:49:24 +01003362 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363}
3364
bellard8df1cd02005-01-28 22:37:22 +00003365/* warning: addr must be aligned. The ram page is not masked as dirty
3366 and the code inside is not invalidated. It is useful if the dirty
3367 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003368void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3369 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003370{
bellard8df1cd02005-01-28 22:37:22 +00003371 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003372 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003373 hwaddr l = 4;
3374 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003375 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003376 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003377 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003378
Paolo Bonzini41063e12015-03-18 14:21:43 +01003379 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003380 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003381 true);
3382 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003383 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003384
Peter Maydell50013112015-04-26 16:49:24 +01003385 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003386 } else {
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003387 addr1 += memory_region_get_ram_addr(mr);
Gonglei3655cb92016-02-20 10:35:20 +08003388 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003389 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003390
Paolo Bonzini845b6212015-03-23 11:45:53 +01003391 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3392 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003393 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003394 r = MEMTX_OK;
3395 }
3396 if (result) {
3397 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003398 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003399 if (release_lock) {
3400 qemu_mutex_unlock_iothread();
3401 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003402 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003403}
3404
Peter Maydell50013112015-04-26 16:49:24 +01003405void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3406{
3407 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3408}
3409
bellard8df1cd02005-01-28 22:37:22 +00003410/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003411static inline void address_space_stl_internal(AddressSpace *as,
3412 hwaddr addr, uint32_t val,
3413 MemTxAttrs attrs,
3414 MemTxResult *result,
3415 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003416{
bellard8df1cd02005-01-28 22:37:22 +00003417 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003418 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003419 hwaddr l = 4;
3420 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003421 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003422 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003423
Paolo Bonzini41063e12015-03-18 14:21:43 +01003424 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003425 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003426 true);
3427 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003428 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003429
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003430#if defined(TARGET_WORDS_BIGENDIAN)
3431 if (endian == DEVICE_LITTLE_ENDIAN) {
3432 val = bswap32(val);
3433 }
3434#else
3435 if (endian == DEVICE_BIG_ENDIAN) {
3436 val = bswap32(val);
3437 }
3438#endif
Peter Maydell50013112015-04-26 16:49:24 +01003439 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003440 } else {
bellard8df1cd02005-01-28 22:37:22 +00003441 /* RAM case */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003442 addr1 += memory_region_get_ram_addr(mr);
Gonglei3655cb92016-02-20 10:35:20 +08003443 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003444 switch (endian) {
3445 case DEVICE_LITTLE_ENDIAN:
3446 stl_le_p(ptr, val);
3447 break;
3448 case DEVICE_BIG_ENDIAN:
3449 stl_be_p(ptr, val);
3450 break;
3451 default:
3452 stl_p(ptr, val);
3453 break;
3454 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003455 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003456 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003457 }
Peter Maydell50013112015-04-26 16:49:24 +01003458 if (result) {
3459 *result = r;
3460 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003461 if (release_lock) {
3462 qemu_mutex_unlock_iothread();
3463 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003464 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003465}
3466
3467void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3468 MemTxAttrs attrs, MemTxResult *result)
3469{
3470 address_space_stl_internal(as, addr, val, attrs, result,
3471 DEVICE_NATIVE_ENDIAN);
3472}
3473
3474void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3475 MemTxAttrs attrs, MemTxResult *result)
3476{
3477 address_space_stl_internal(as, addr, val, attrs, result,
3478 DEVICE_LITTLE_ENDIAN);
3479}
3480
3481void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3482 MemTxAttrs attrs, MemTxResult *result)
3483{
3484 address_space_stl_internal(as, addr, val, attrs, result,
3485 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003486}
3487
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003488void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003489{
Peter Maydell50013112015-04-26 16:49:24 +01003490 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003491}
3492
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003493void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003494{
Peter Maydell50013112015-04-26 16:49:24 +01003495 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003496}
3497
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003498void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003499{
Peter Maydell50013112015-04-26 16:49:24 +01003500 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003501}
3502
bellardaab33092005-10-30 20:48:42 +00003503/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003504void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3505 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003506{
3507 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003508 MemTxResult r;
3509
3510 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3511 if (result) {
3512 *result = r;
3513 }
3514}
3515
3516void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3517{
3518 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003519}
3520
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003521/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003522static inline void address_space_stw_internal(AddressSpace *as,
3523 hwaddr addr, uint32_t val,
3524 MemTxAttrs attrs,
3525 MemTxResult *result,
3526 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003527{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003528 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003529 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003530 hwaddr l = 2;
3531 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003532 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003533 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003534
Paolo Bonzini41063e12015-03-18 14:21:43 +01003535 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003536 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003537 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003538 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003539
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003540#if defined(TARGET_WORDS_BIGENDIAN)
3541 if (endian == DEVICE_LITTLE_ENDIAN) {
3542 val = bswap16(val);
3543 }
3544#else
3545 if (endian == DEVICE_BIG_ENDIAN) {
3546 val = bswap16(val);
3547 }
3548#endif
Peter Maydell50013112015-04-26 16:49:24 +01003549 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003550 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003551 /* RAM case */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003552 addr1 += memory_region_get_ram_addr(mr);
Gonglei3655cb92016-02-20 10:35:20 +08003553 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003554 switch (endian) {
3555 case DEVICE_LITTLE_ENDIAN:
3556 stw_le_p(ptr, val);
3557 break;
3558 case DEVICE_BIG_ENDIAN:
3559 stw_be_p(ptr, val);
3560 break;
3561 default:
3562 stw_p(ptr, val);
3563 break;
3564 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003565 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003566 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003567 }
Peter Maydell50013112015-04-26 16:49:24 +01003568 if (result) {
3569 *result = r;
3570 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003571 if (release_lock) {
3572 qemu_mutex_unlock_iothread();
3573 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003574 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003575}
3576
3577void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3578 MemTxAttrs attrs, MemTxResult *result)
3579{
3580 address_space_stw_internal(as, addr, val, attrs, result,
3581 DEVICE_NATIVE_ENDIAN);
3582}
3583
3584void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3585 MemTxAttrs attrs, MemTxResult *result)
3586{
3587 address_space_stw_internal(as, addr, val, attrs, result,
3588 DEVICE_LITTLE_ENDIAN);
3589}
3590
3591void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3592 MemTxAttrs attrs, MemTxResult *result)
3593{
3594 address_space_stw_internal(as, addr, val, attrs, result,
3595 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003596}
3597
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003598void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003599{
Peter Maydell50013112015-04-26 16:49:24 +01003600 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003601}
3602
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003603void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003604{
Peter Maydell50013112015-04-26 16:49:24 +01003605 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003606}
3607
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003608void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609{
Peter Maydell50013112015-04-26 16:49:24 +01003610 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003611}
3612
bellardaab33092005-10-30 20:48:42 +00003613/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003614void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3615 MemTxAttrs attrs, MemTxResult *result)
3616{
3617 MemTxResult r;
3618 val = tswap64(val);
3619 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3620 if (result) {
3621 *result = r;
3622 }
3623}
3624
3625void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3626 MemTxAttrs attrs, MemTxResult *result)
3627{
3628 MemTxResult r;
3629 val = cpu_to_le64(val);
3630 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3631 if (result) {
3632 *result = r;
3633 }
3634}
3635void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3636 MemTxAttrs attrs, MemTxResult *result)
3637{
3638 MemTxResult r;
3639 val = cpu_to_be64(val);
3640 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3641 if (result) {
3642 *result = r;
3643 }
3644}
3645
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003646void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003647{
Peter Maydell50013112015-04-26 16:49:24 +01003648 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003649}
3650
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003651void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003652{
Peter Maydell50013112015-04-26 16:49:24 +01003653 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003654}
3655
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003656void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003657{
Peter Maydell50013112015-04-26 16:49:24 +01003658 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003659}
3660
aliguori5e2972f2009-03-28 17:51:36 +00003661/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003662int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003663 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003664{
3665 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003666 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003667 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003668
3669 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003670 int asidx;
3671 MemTxAttrs attrs;
3672
bellard13eb76e2004-01-24 15:23:36 +00003673 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003674 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3675 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003676 /* if no physical page mapped, return an error */
3677 if (phys_addr == -1)
3678 return -1;
3679 l = (page + TARGET_PAGE_SIZE) - addr;
3680 if (l > len)
3681 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003682 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003683 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003684 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3685 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003686 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003687 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3688 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003689 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003690 }
bellard13eb76e2004-01-24 15:23:36 +00003691 len -= l;
3692 buf += l;
3693 addr += l;
3694 }
3695 return 0;
3696}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003697
3698/*
3699 * Allows code that needs to deal with migration bitmaps etc to still be built
3700 * target independent.
3701 */
3702size_t qemu_target_page_bits(void)
3703{
3704 return TARGET_PAGE_BITS;
3705}
3706
Paul Brooka68fe892010-03-01 00:08:59 +00003707#endif
bellard13eb76e2004-01-24 15:23:36 +00003708
Blue Swirl8e4a4242013-01-06 18:30:17 +00003709/*
3710 * A helper function for the _utterly broken_ virtio device model to find out if
3711 * it's running on a big endian machine. Don't do this at home kids!
3712 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003713bool target_words_bigendian(void);
3714bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003715{
3716#if defined(TARGET_WORDS_BIGENDIAN)
3717 return true;
3718#else
3719 return false;
3720#endif
3721}
3722
Wen Congyang76f35532012-05-07 12:04:18 +08003723#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003724bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003725{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003726 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003727 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003728 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003729
Paolo Bonzini41063e12015-03-18 14:21:43 +01003730 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003731 mr = address_space_translate(&address_space_memory,
3732 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003733
Paolo Bonzini41063e12015-03-18 14:21:43 +01003734 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3735 rcu_read_unlock();
3736 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003737}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003738
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003739int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003740{
3741 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003742 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003743
Mike Day0dc3f442013-09-05 14:41:35 -04003744 rcu_read_lock();
3745 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003746 ret = func(block->idstr, block->host, block->offset,
3747 block->used_length, opaque);
3748 if (ret) {
3749 break;
3750 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003751 }
Mike Day0dc3f442013-09-05 14:41:35 -04003752 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003753 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003754}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003755#endif