blob: ab373604d77aedbbfcb94afd674541a35d183302 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000021#include <sys/mman.h>
22#endif
bellard54936002003-05-13 00:25:15 +000023
Stefan Weil055403b2010-10-22 23:03:32 +020024#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000026#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000027#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010028#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020029#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020033#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040048#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020049#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030051#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000052
Paolo Bonzini022c62c2012-12-17 18:19:49 +010053#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020054#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030055#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
pbrooke2eef172008-06-08 01:09:01 +000090#endif
bellard9fa3e852004-01-04 18:06:42 +000091
Andreas Färberbdc44642013-06-24 23:50:24 +020092struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000093/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020095__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000096/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000097 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000098 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010099int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000100
pbrooke2eef172008-06-08 01:09:01 +0000101#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200102
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103typedef struct PhysPageEntry PhysPageEntry;
104
105struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100115#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200117#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118#define P_L2_SIZE (1 << P_L2_BITS)
119
120#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100125 struct rcu_head rcu;
126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133} PhysPageMap;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100136 struct rcu_head rcu;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138 /* This is a multi-level map on the physical address space.
139 * The bottom level has pointers to MemoryRegionSections.
140 */
141 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200142 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200143 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200144};
145
Jan Kiszka90260c62013-05-26 21:46:51 +0200146#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
147typedef struct subpage_t {
148 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200149 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200150 hwaddr base;
151 uint16_t sub_section[TARGET_PAGE_SIZE];
152} subpage_t;
153
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200154#define PHYS_SECTION_UNASSIGNED 0
155#define PHYS_SECTION_NOTDIRTY 1
156#define PHYS_SECTION_ROM 2
157#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200158
pbrooke2eef172008-06-08 01:09:01 +0000159static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300160static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000161static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000162
Avi Kivity1ec9b902012-01-02 12:47:48 +0200163static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100164
165/**
166 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
167 * @cpu: the CPU whose AddressSpace this is
168 * @as: the AddressSpace itself
169 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
170 * @tcg_as_listener: listener for tracking changes to the AddressSpace
171 */
172struct CPUAddressSpace {
173 CPUState *cpu;
174 AddressSpace *as;
175 struct AddressSpaceDispatch *memory_dispatch;
176 MemoryListener tcg_as_listener;
177};
178
pbrook6658ffb2007-03-16 23:58:11 +0000179#endif
bellard54936002003-05-13 00:25:15 +0000180
Paul Brook6d9a1302010-02-28 23:55:53 +0000181#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200182
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200183static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
186 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
188 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189 }
190}
191
Paolo Bonzinidb946042015-05-21 15:12:29 +0200192static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193{
194 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200195 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196 PhysPageEntry e;
197 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200199 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200202 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203
204 e.skip = leaf ? 0 : 1;
205 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200209 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210}
211
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200212static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
213 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200214 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200215{
216 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100217 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200219 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200220 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200222 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100223 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224
Paolo Bonzini03f49952013-11-07 17:14:36 +0100225 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200226 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200227 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200228 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200229 *index += step;
230 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200231 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200232 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200233 }
234 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200235 }
236}
237
Avi Kivityac1970f2012-10-03 16:22:53 +0200238static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200239 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200240 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000241{
Avi Kivity29990972012-02-13 20:21:20 +0200242 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000244
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200245 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000246}
247
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200248/* Compact a non leaf page entry. Simply detect that the entry has a single child,
249 * and update our entry so we can skip it and go directly to the destination.
250 */
251static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
252{
253 unsigned valid_ptr = P_L2_SIZE;
254 int valid = 0;
255 PhysPageEntry *p;
256 int i;
257
258 if (lp->ptr == PHYS_MAP_NODE_NIL) {
259 return;
260 }
261
262 p = nodes[lp->ptr];
263 for (i = 0; i < P_L2_SIZE; i++) {
264 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
265 continue;
266 }
267
268 valid_ptr = i;
269 valid++;
270 if (p[i].skip) {
271 phys_page_compact(&p[i], nodes, compacted);
272 }
273 }
274
275 /* We can only compress if there's only one child. */
276 if (valid != 1) {
277 return;
278 }
279
280 assert(valid_ptr < P_L2_SIZE);
281
282 /* Don't compress if it won't fit in the # of bits we have. */
283 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
284 return;
285 }
286
287 lp->ptr = p[valid_ptr].ptr;
288 if (!p[valid_ptr].skip) {
289 /* If our only child is a leaf, make this a leaf. */
290 /* By design, we should have made this node a leaf to begin with so we
291 * should never reach here.
292 * But since it's so simple to handle this, let's do it just in case we
293 * change this rule.
294 */
295 lp->skip = 0;
296 } else {
297 lp->skip += p[valid_ptr].skip;
298 }
299}
300
301static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
302{
303 DECLARE_BITMAP(compacted, nodes_nb);
304
305 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200306 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307 }
308}
309
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200310static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200311 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000312{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200313 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200314 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200315 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200316
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200317 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200318 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200319 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200320 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200321 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100322 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200323 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200324
325 if (sections[lp.ptr].size.hi ||
326 range_covers_byte(sections[lp.ptr].offset_within_address_space,
327 sections[lp.ptr].size.lo, addr)) {
328 return &sections[lp.ptr];
329 } else {
330 return &sections[PHYS_SECTION_UNASSIGNED];
331 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200332}
333
Blue Swirle5548612012-04-21 13:08:33 +0000334bool memory_region_is_unassigned(MemoryRegion *mr)
335{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200336 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000337 && mr != &io_mem_watch;
338}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100340/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr addr,
343 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200344{
Jan Kiszka90260c62013-05-26 21:46:51 +0200345 MemoryRegionSection *section;
346 subpage_t *subpage;
347
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200348 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200349 if (resolve_subpage && section->mr->subpage) {
350 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200351 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 }
353 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200354}
355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200357static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200358address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200359 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200360{
361 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200362 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100363 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200364
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200365 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200366 /* Compute offset within MemoryRegionSection */
367 addr -= section->offset_within_address_space;
368
369 /* Compute offset within MemoryRegion */
370 *xlat = addr + section->offset_within_region;
371
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200372 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200373
374 /* MMIO registers can be expected to perform full-width accesses based only
375 * on their address, without considering adjacent registers that could
376 * decode to completely different MemoryRegions. When such registers
377 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
378 * regions overlap wildly. For this reason we cannot clamp the accesses
379 * here.
380 *
381 * If the length is small (as is the case for address_space_ldl/stl),
382 * everything works fine. If the incoming length is large, however,
383 * the caller really has to do the clamping through memory_access_size.
384 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200385 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200386 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200387 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
388 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200389 return section;
390}
Jan Kiszka90260c62013-05-26 21:46:51 +0200391
Paolo Bonzini41063e12015-03-18 14:21:43 +0100392/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200393MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
394 hwaddr *xlat, hwaddr *plen,
395 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200396{
Avi Kivity30951152012-10-30 13:47:46 +0200397 IOMMUTLBEntry iotlb;
398 MemoryRegionSection *section;
399 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200400
401 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100402 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
403 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200404 mr = section->mr;
405
406 if (!mr->iommu_ops) {
407 break;
408 }
409
Le Tan8d7b8cb2014-08-16 13:55:37 +0800410 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200411 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
412 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700413 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200414 if (!(iotlb.perm & (1 << is_write))) {
415 mr = &io_mem_unassigned;
416 break;
417 }
418
419 as = iotlb.target_as;
420 }
421
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000422 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100423 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700424 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100425 }
426
Avi Kivity30951152012-10-30 13:47:46 +0200427 *xlat = addr;
428 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200429}
430
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100431/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200432MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000433address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200434 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200435{
Avi Kivity30951152012-10-30 13:47:46 +0200436 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000437 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
438
439 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200440
441 assert(!section->mr->iommu_ops);
442 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200443}
bellard9fa3e852004-01-04 18:06:42 +0000444#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000445
Andreas Färberb170fce2013-01-20 20:23:22 +0100446#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000447
Juan Quintelae59fb372009-09-29 22:48:21 +0200448static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200449{
Andreas Färber259186a2013-01-17 18:51:17 +0100450 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200451
aurel323098dba2009-03-07 21:28:24 +0000452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100454 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100455 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000456
457 return 0;
458}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200459
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400460static int cpu_common_pre_load(void *opaque)
461{
462 CPUState *cpu = opaque;
463
Paolo Bonziniadee6422014-12-19 12:53:14 +0100464 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400465
466 return 0;
467}
468
469static bool cpu_common_exception_index_needed(void *opaque)
470{
471 CPUState *cpu = opaque;
472
Paolo Bonziniadee6422014-12-19 12:53:14 +0100473 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474}
475
476static const VMStateDescription vmstate_cpu_common_exception_index = {
477 .name = "cpu_common/exception_index",
478 .version_id = 1,
479 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200480 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400481 .fields = (VMStateField[]) {
482 VMSTATE_INT32(exception_index, CPUState),
483 VMSTATE_END_OF_LIST()
484 }
485};
486
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300487static bool cpu_common_crash_occurred_needed(void *opaque)
488{
489 CPUState *cpu = opaque;
490
491 return cpu->crash_occurred;
492}
493
494static const VMStateDescription vmstate_cpu_common_crash_occurred = {
495 .name = "cpu_common/crash_occurred",
496 .version_id = 1,
497 .minimum_version_id = 1,
498 .needed = cpu_common_crash_occurred_needed,
499 .fields = (VMStateField[]) {
500 VMSTATE_BOOL(crash_occurred, CPUState),
501 VMSTATE_END_OF_LIST()
502 }
503};
504
Andreas Färber1a1562f2013-06-17 04:09:11 +0200505const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200506 .name = "cpu_common",
507 .version_id = 1,
508 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400509 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200511 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100512 VMSTATE_UINT32(halted, CPUState),
513 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200514 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400515 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200516 .subsections = (const VMStateDescription*[]) {
517 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300518 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200519 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200520 }
521};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200522
pbrook9656f322008-07-01 20:01:19 +0000523#endif
524
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100525CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400526{
Andreas Färberbdc44642013-06-24 23:50:24 +0200527 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400528
Andreas Färberbdc44642013-06-24 23:50:24 +0200529 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100530 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200531 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100532 }
Glauber Costa950f1472009-06-09 12:15:18 -0400533 }
534
Andreas Färberbdc44642013-06-24 23:50:24 +0200535 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400536}
537
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000538#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000539void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000540{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000541 CPUAddressSpace *newas;
542
543 /* Target code should have set num_ases before calling us */
544 assert(asidx < cpu->num_ases);
545
Peter Maydell56943e82016-01-21 14:15:04 +0000546 if (asidx == 0) {
547 /* address space 0 gets the convenience alias */
548 cpu->as = as;
549 }
550
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000551 /* KVM cannot currently support multiple address spaces. */
552 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000553
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000554 if (!cpu->cpu_ases) {
555 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000556 }
Peter Maydell32857f42015-10-01 15:29:50 +0100557
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000558 newas = &cpu->cpu_ases[asidx];
559 newas->cpu = cpu;
560 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000561 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000562 newas->tcg_as_listener.commit = tcg_commit;
563 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000564 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000566
567AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
568{
569 /* Return the AddressSpace corresponding to the specified index */
570 return cpu->cpu_ases[asidx].as;
571}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000572#endif
573
Bharata B Raob7bca732015-06-23 19:31:13 -0700574#ifndef CONFIG_USER_ONLY
575static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
576
577static int cpu_get_free_index(Error **errp)
578{
579 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
580
581 if (cpu >= MAX_CPUMASK_BITS) {
582 error_setg(errp, "Trying to use more CPUs than max of %d",
583 MAX_CPUMASK_BITS);
584 return -1;
585 }
586
587 bitmap_set(cpu_index_map, cpu, 1);
588 return cpu;
589}
590
591void cpu_exec_exit(CPUState *cpu)
592{
593 if (cpu->cpu_index == -1) {
594 /* cpu_index was never allocated by this @cpu or was already freed. */
595 return;
596 }
597
598 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
599 cpu->cpu_index = -1;
600}
601#else
602
603static int cpu_get_free_index(Error **errp)
604{
605 CPUState *some_cpu;
606 int cpu_index = 0;
607
608 CPU_FOREACH(some_cpu) {
609 cpu_index++;
610 }
611 return cpu_index;
612}
613
614void cpu_exec_exit(CPUState *cpu)
615{
616}
617#endif
618
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700619void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000620{
Andreas Färberb170fce2013-01-20 20:23:22 +0100621 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000622 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700623 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000624
Peter Maydell56943e82016-01-21 14:15:04 +0000625 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000626 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000627
Eduardo Habkost291135b2015-04-27 17:00:33 -0300628#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300629 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000630
631 /* This is a softmmu CPU object, so create a property for it
632 * so users can wire up its memory. (This can't go in qom/cpu.c
633 * because that file is compiled only once for both user-mode
634 * and system builds.) The default if no link is set up is to use
635 * the system address space.
636 */
637 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
638 (Object **)&cpu->memory,
639 qdev_prop_allow_set_link_before_realize,
640 OBJ_PROP_LINK_UNREF_ON_RELEASE,
641 &error_abort);
642 cpu->memory = system_memory;
643 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300644#endif
645
pbrookc2764712009-03-07 15:24:59 +0000646#if defined(CONFIG_USER_ONLY)
647 cpu_list_lock();
648#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700649 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
650 if (local_err) {
651 error_propagate(errp, local_err);
652#if defined(CONFIG_USER_ONLY)
653 cpu_list_unlock();
654#endif
655 return;
bellard6a00d602005-11-21 23:25:50 +0000656 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200657 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000658#if defined(CONFIG_USER_ONLY)
659 cpu_list_unlock();
660#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200661 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
662 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
663 }
pbrookb3c77242008-06-30 16:31:04 +0000664#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600665 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700666 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100667 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200668 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000669#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100670 if (cc->vmsd != NULL) {
671 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
672 }
bellardfd6ce8f2003-05-14 19:00:11 +0000673}
674
Paul Brook94df27f2010-02-28 23:47:45 +0000675#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200676static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000677{
678 tb_invalidate_phys_page_range(pc, pc + 1, 0);
679}
680#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200681static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400682{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000683 MemTxAttrs attrs;
684 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
685 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400686 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000687 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100688 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400689 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400690}
bellardc27004e2005-01-03 23:35:10 +0000691#endif
bellardd720b932004-04-25 17:57:43 +0000692
Paul Brookc527ee82010-03-01 03:31:14 +0000693#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200694void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000695
696{
697}
698
Peter Maydell3ee887e2014-09-12 14:06:48 +0100699int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
700 int flags)
701{
702 return -ENOSYS;
703}
704
705void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
706{
707}
708
Andreas Färber75a34032013-09-02 16:57:02 +0200709int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000710 int flags, CPUWatchpoint **watchpoint)
711{
712 return -ENOSYS;
713}
714#else
pbrook6658ffb2007-03-16 23:58:11 +0000715/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200716int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000717 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000718{
aliguoric0ce9982008-11-25 22:13:57 +0000719 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000720
Peter Maydell05068c02014-09-12 14:06:48 +0100721 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700722 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200723 error_report("tried to set invalid watchpoint at %"
724 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000725 return -EINVAL;
726 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500727 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000728
aliguoria1d1bb32008-11-18 20:07:32 +0000729 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100730 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000731 wp->flags = flags;
732
aliguori2dc9f412008-11-18 20:56:59 +0000733 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200734 if (flags & BP_GDB) {
735 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
736 } else {
737 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
738 }
aliguoria1d1bb32008-11-18 20:07:32 +0000739
Andreas Färber31b030d2013-09-04 01:29:02 +0200740 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000741
742 if (watchpoint)
743 *watchpoint = wp;
744 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000745}
746
aliguoria1d1bb32008-11-18 20:07:32 +0000747/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200748int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000749 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000750{
aliguoria1d1bb32008-11-18 20:07:32 +0000751 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000752
Andreas Färberff4700b2013-08-26 18:23:18 +0200753 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100754 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000755 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200756 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000757 return 0;
758 }
759 }
aliguoria1d1bb32008-11-18 20:07:32 +0000760 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000761}
762
aliguoria1d1bb32008-11-18 20:07:32 +0000763/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200764void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000765{
Andreas Färberff4700b2013-08-26 18:23:18 +0200766 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000767
Andreas Färber31b030d2013-09-04 01:29:02 +0200768 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000769
Anthony Liguori7267c092011-08-20 22:09:37 -0500770 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000771}
772
aliguoria1d1bb32008-11-18 20:07:32 +0000773/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200774void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000775{
aliguoric0ce9982008-11-25 22:13:57 +0000776 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000777
Andreas Färberff4700b2013-08-26 18:23:18 +0200778 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200779 if (wp->flags & mask) {
780 cpu_watchpoint_remove_by_ref(cpu, wp);
781 }
aliguoric0ce9982008-11-25 22:13:57 +0000782 }
aliguoria1d1bb32008-11-18 20:07:32 +0000783}
Peter Maydell05068c02014-09-12 14:06:48 +0100784
785/* Return true if this watchpoint address matches the specified
786 * access (ie the address range covered by the watchpoint overlaps
787 * partially or completely with the address range covered by the
788 * access).
789 */
790static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
791 vaddr addr,
792 vaddr len)
793{
794 /* We know the lengths are non-zero, but a little caution is
795 * required to avoid errors in the case where the range ends
796 * exactly at the top of the address space and so addr + len
797 * wraps round to zero.
798 */
799 vaddr wpend = wp->vaddr + wp->len - 1;
800 vaddr addrend = addr + len - 1;
801
802 return !(addr > wpend || wp->vaddr > addrend);
803}
804
Paul Brookc527ee82010-03-01 03:31:14 +0000805#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000806
807/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200808int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000809 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000810{
aliguoric0ce9982008-11-25 22:13:57 +0000811 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000812
Anthony Liguori7267c092011-08-20 22:09:37 -0500813 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000814
815 bp->pc = pc;
816 bp->flags = flags;
817
aliguori2dc9f412008-11-18 20:56:59 +0000818 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200819 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200820 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200821 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200822 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200823 }
aliguoria1d1bb32008-11-18 20:07:32 +0000824
Andreas Färberf0c3c502013-08-26 21:22:53 +0200825 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000826
Andreas Färber00b941e2013-06-29 18:55:54 +0200827 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000828 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200829 }
aliguoria1d1bb32008-11-18 20:07:32 +0000830 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000831}
832
833/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200834int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000835{
aliguoria1d1bb32008-11-18 20:07:32 +0000836 CPUBreakpoint *bp;
837
Andreas Färberf0c3c502013-08-26 21:22:53 +0200838 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000839 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200840 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000841 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000842 }
bellard4c3a88a2003-07-26 12:06:08 +0000843 }
aliguoria1d1bb32008-11-18 20:07:32 +0000844 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000845}
846
aliguoria1d1bb32008-11-18 20:07:32 +0000847/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200848void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000849{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200850 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
851
852 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000853
Anthony Liguori7267c092011-08-20 22:09:37 -0500854 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000855}
856
857/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200858void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000859{
aliguoric0ce9982008-11-25 22:13:57 +0000860 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000861
Andreas Färberf0c3c502013-08-26 21:22:53 +0200862 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200863 if (bp->flags & mask) {
864 cpu_breakpoint_remove_by_ref(cpu, bp);
865 }
aliguoric0ce9982008-11-25 22:13:57 +0000866 }
bellard4c3a88a2003-07-26 12:06:08 +0000867}
868
bellardc33a3462003-07-29 20:50:33 +0000869/* enable or disable single step mode. EXCP_DEBUG is returned by the
870 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200871void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000872{
Andreas Färbered2803d2013-06-21 20:20:45 +0200873 if (cpu->singlestep_enabled != enabled) {
874 cpu->singlestep_enabled = enabled;
875 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200876 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200877 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100878 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000879 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700880 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000881 }
bellardc33a3462003-07-29 20:50:33 +0000882 }
bellardc33a3462003-07-29 20:50:33 +0000883}
884
Andreas Färbera47dddd2013-09-03 17:38:47 +0200885void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000886{
887 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000888 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000889
890 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000891 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000892 fprintf(stderr, "qemu: fatal: ");
893 vfprintf(stderr, fmt, ap);
894 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200895 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100896 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000897 qemu_log("qemu: fatal: ");
898 qemu_log_vprintf(fmt, ap2);
899 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200900 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000901 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000902 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000903 }
pbrook493ae1f2007-11-23 16:53:59 +0000904 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000905 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300906 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200907#if defined(CONFIG_USER_ONLY)
908 {
909 struct sigaction act;
910 sigfillset(&act.sa_mask);
911 act.sa_handler = SIG_DFL;
912 sigaction(SIGABRT, &act, NULL);
913 }
914#endif
bellard75012672003-06-21 13:11:07 +0000915 abort();
916}
917
bellard01243112004-01-04 15:48:17 +0000918#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400919/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200920static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
921{
922 RAMBlock *block;
923
Paolo Bonzini43771532013-09-09 17:58:40 +0200924 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200925 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200926 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200927 }
Mike Day0dc3f442013-09-05 14:41:35 -0400928 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200929 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200930 goto found;
931 }
932 }
933
934 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
935 abort();
936
937found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200938 /* It is safe to write mru_block outside the iothread lock. This
939 * is what happens:
940 *
941 * mru_block = xxx
942 * rcu_read_unlock()
943 * xxx removed from list
944 * rcu_read_lock()
945 * read mru_block
946 * mru_block = NULL;
947 * call_rcu(reclaim_ramblock, xxx);
948 * rcu_read_unlock()
949 *
950 * atomic_rcu_set is not needed here. The block was already published
951 * when it was placed into the list. Here we're just making an extra
952 * copy of the pointer.
953 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200954 ram_list.mru_block = block;
955 return block;
956}
957
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200958static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000959{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700960 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200961 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200962 RAMBlock *block;
963 ram_addr_t end;
964
965 end = TARGET_PAGE_ALIGN(start + length);
966 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000967
Mike Day0dc3f442013-09-05 14:41:35 -0400968 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200969 block = qemu_get_ram_block(start);
970 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200971 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700972 CPU_FOREACH(cpu) {
973 tlb_reset_dirty(cpu, start1, length);
974 }
Mike Day0dc3f442013-09-05 14:41:35 -0400975 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200976}
977
978/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000979bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
980 ram_addr_t length,
981 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200982{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000983 unsigned long end, page;
984 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200985
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000986 if (length == 0) {
987 return false;
988 }
989
990 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
991 page = start >> TARGET_PAGE_BITS;
992 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
993 page, end - page);
994
995 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200996 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200997 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000998
999 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001000}
1001
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001002/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001003hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001004 MemoryRegionSection *section,
1005 target_ulong vaddr,
1006 hwaddr paddr, hwaddr xlat,
1007 int prot,
1008 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001009{
Avi Kivitya8170e52012-10-23 12:30:10 +02001010 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001011 CPUWatchpoint *wp;
1012
Blue Swirlcc5bea62012-04-14 14:56:48 +00001013 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001014 /* Normal RAM. */
1015 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001016 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001017 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001018 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001019 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001020 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001021 }
1022 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001023 AddressSpaceDispatch *d;
1024
1025 d = atomic_rcu_read(&section->address_space->dispatch);
1026 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001027 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001028 }
1029
1030 /* Make accesses to pages with watchpoints go via the
1031 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001032 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001033 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001034 /* Avoid trapping reads of pages with a write breakpoint. */
1035 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001036 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001037 *address |= TLB_MMIO;
1038 break;
1039 }
1040 }
1041 }
1042
1043 return iotlb;
1044}
bellard9fa3e852004-01-04 18:06:42 +00001045#endif /* defined(CONFIG_USER_ONLY) */
1046
pbrooke2eef172008-06-08 01:09:01 +00001047#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001048
Anthony Liguoric227f092009-10-01 16:12:16 -05001049static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001050 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001051static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001052
Igor Mammedova2b257d2014-10-31 16:38:37 +00001053static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1054 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001055
1056/*
1057 * Set a custom physical guest memory alloator.
1058 * Accelerators with unusual needs may need this. Hopefully, we can
1059 * get rid of it eventually.
1060 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001061void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001062{
1063 phys_mem_alloc = alloc;
1064}
1065
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001066static uint16_t phys_section_add(PhysPageMap *map,
1067 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001068{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001069 /* The physical section number is ORed with a page-aligned
1070 * pointer to produce the iotlb entries. Thus it should
1071 * never overflow into the page-aligned value.
1072 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001073 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001074
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001075 if (map->sections_nb == map->sections_nb_alloc) {
1076 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1077 map->sections = g_renew(MemoryRegionSection, map->sections,
1078 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001079 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001080 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001081 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001082 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001083}
1084
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001085static void phys_section_destroy(MemoryRegion *mr)
1086{
Don Slutz55b4e802015-11-30 17:11:04 -05001087 bool have_sub_page = mr->subpage;
1088
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001089 memory_region_unref(mr);
1090
Don Slutz55b4e802015-11-30 17:11:04 -05001091 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001092 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001093 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001094 g_free(subpage);
1095 }
1096}
1097
Paolo Bonzini60926662013-05-29 12:30:26 +02001098static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001099{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001100 while (map->sections_nb > 0) {
1101 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001102 phys_section_destroy(section->mr);
1103 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001104 g_free(map->sections);
1105 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001106}
1107
Avi Kivityac1970f2012-10-03 16:22:53 +02001108static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001109{
1110 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001111 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001113 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001114 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001115 MemoryRegionSection subsection = {
1116 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001117 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001118 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001119 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001120
Avi Kivityf3705d52012-03-08 16:16:34 +02001121 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001122
Avi Kivityf3705d52012-03-08 16:16:34 +02001123 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001124 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001125 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001126 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001127 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001128 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001129 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001130 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001131 }
1132 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001133 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001134 subpage_register(subpage, start, end,
1135 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001136}
1137
1138
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001139static void register_multipage(AddressSpaceDispatch *d,
1140 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001141{
Avi Kivitya8170e52012-10-23 12:30:10 +02001142 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001143 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001144 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1145 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001146
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001147 assert(num_pages);
1148 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001149}
1150
Avi Kivityac1970f2012-10-03 16:22:53 +02001151static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001153 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001154 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001155 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001156 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001158 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1159 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1160 - now.offset_within_address_space;
1161
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001162 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001163 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001164 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001165 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001166 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001167 while (int128_ne(remain.size, now.size)) {
1168 remain.size = int128_sub(remain.size, now.size);
1169 remain.offset_within_address_space += int128_get64(now.size);
1170 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001171 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001172 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001173 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001174 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001175 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001176 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001177 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001178 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001179 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001180 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001181 }
1182}
1183
Sheng Yang62a27442010-01-26 19:21:16 +08001184void qemu_flush_coalesced_mmio_buffer(void)
1185{
1186 if (kvm_enabled())
1187 kvm_flush_coalesced_mmio_buffer();
1188}
1189
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001190void qemu_mutex_lock_ramlist(void)
1191{
1192 qemu_mutex_lock(&ram_list.mutex);
1193}
1194
1195void qemu_mutex_unlock_ramlist(void)
1196{
1197 qemu_mutex_unlock(&ram_list.mutex);
1198}
1199
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001200#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001201
1202#include <sys/vfs.h>
1203
1204#define HUGETLBFS_MAGIC 0x958458f6
1205
Hu Taofc7a5802014-09-09 13:28:01 +08001206static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001207{
1208 struct statfs fs;
1209 int ret;
1210
1211 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001212 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001213 } while (ret != 0 && errno == EINTR);
1214
1215 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001216 error_setg_errno(errp, errno, "failed to get page size of file %s",
1217 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001218 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001219 }
1220
Marcelo Tosattic9027602010-03-01 20:25:08 -03001221 return fs.f_bsize;
1222}
1223
Alex Williamson04b16652010-07-02 11:13:17 -06001224static void *file_ram_alloc(RAMBlock *block,
1225 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001226 const char *path,
1227 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001228{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001229 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001230 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001231 char *sanitized_name;
1232 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001233 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001234 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001235 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001236 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237
Hu Taofc7a5802014-09-09 13:28:01 +08001238 hpagesize = gethugepagesize(path, &local_err);
1239 if (local_err) {
1240 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001241 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001242 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001243 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001244
1245 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001246 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1247 "or larger than huge page size 0x%" PRIx64,
1248 memory, hpagesize);
1249 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001250 }
1251
1252 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001253 error_setg(errp,
1254 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001255 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001256 }
1257
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001258 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1259 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1260 sanitized_name = g_strdup(memory_region_name(block->mr));
1261 for (c = sanitized_name; *c != '\0'; c++) {
1262 if (*c == '/') {
1263 *c = '_';
1264 }
1265 }
1266
1267 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1268 sanitized_name);
1269 g_free(sanitized_name);
1270
1271 fd = mkstemp(filename);
1272 if (fd >= 0) {
1273 unlink(filename);
1274 }
1275 g_free(filename);
1276 } else {
1277 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001278 }
1279
Marcelo Tosattic9027602010-03-01 20:25:08 -03001280 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001281 error_setg_errno(errp, errno,
1282 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001283 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001284 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001285
Chen Hanxiao9284f312015-07-24 11:12:03 +08001286 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001287
1288 /*
1289 * ftruncate is not supported by hugetlbfs in older
1290 * hosts, so don't bother bailing out on errors.
1291 * If anything goes wrong with it under other filesystems,
1292 * mmap will fail.
1293 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001294 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001295 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001296 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001297
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001298 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001299 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001300 error_setg_errno(errp, errno,
1301 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001302 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001303 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001304 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001305
1306 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001307 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001308 }
1309
Alex Williamson04b16652010-07-02 11:13:17 -06001310 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001311 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001312
1313error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001314 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001315}
1316#endif
1317
Mike Day0dc3f442013-09-05 14:41:35 -04001318/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001319static ram_addr_t find_ram_offset(ram_addr_t size)
1320{
Alex Williamson04b16652010-07-02 11:13:17 -06001321 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001322 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001323
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001324 assert(size != 0); /* it would hand out same offset multiple times */
1325
Mike Day0dc3f442013-09-05 14:41:35 -04001326 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001327 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001328 }
Alex Williamson04b16652010-07-02 11:13:17 -06001329
Mike Day0dc3f442013-09-05 14:41:35 -04001330 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001331 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001332
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001333 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001334
Mike Day0dc3f442013-09-05 14:41:35 -04001335 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001336 if (next_block->offset >= end) {
1337 next = MIN(next, next_block->offset);
1338 }
1339 }
1340 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001341 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001342 mingap = next - end;
1343 }
1344 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001345
1346 if (offset == RAM_ADDR_MAX) {
1347 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1348 (uint64_t)size);
1349 abort();
1350 }
1351
Alex Williamson04b16652010-07-02 11:13:17 -06001352 return offset;
1353}
1354
Juan Quintela652d7ec2012-07-20 10:37:54 +02001355ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001356{
Alex Williamsond17b5282010-06-25 11:08:38 -06001357 RAMBlock *block;
1358 ram_addr_t last = 0;
1359
Mike Day0dc3f442013-09-05 14:41:35 -04001360 rcu_read_lock();
1361 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001362 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001363 }
Mike Day0dc3f442013-09-05 14:41:35 -04001364 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001365 return last;
1366}
1367
Jason Baronddb97f12012-08-02 15:44:16 -04001368static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1369{
1370 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001371
1372 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001373 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001374 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1375 if (ret) {
1376 perror("qemu_madvise");
1377 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1378 "but dump_guest_core=off specified\n");
1379 }
1380 }
1381}
1382
Mike Day0dc3f442013-09-05 14:41:35 -04001383/* Called within an RCU critical section, or while the ramlist lock
1384 * is held.
1385 */
Hu Tao20cfe882014-04-02 15:13:26 +08001386static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001387{
Hu Tao20cfe882014-04-02 15:13:26 +08001388 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001389
Mike Day0dc3f442013-09-05 14:41:35 -04001390 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001391 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001392 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001393 }
1394 }
Hu Tao20cfe882014-04-02 15:13:26 +08001395
1396 return NULL;
1397}
1398
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001399const char *qemu_ram_get_idstr(RAMBlock *rb)
1400{
1401 return rb->idstr;
1402}
1403
Mike Dayae3a7042013-09-05 14:41:35 -04001404/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001405void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1406{
Mike Dayae3a7042013-09-05 14:41:35 -04001407 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001408
Mike Day0dc3f442013-09-05 14:41:35 -04001409 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001410 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001411 assert(new_block);
1412 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001413
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001414 if (dev) {
1415 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001416 if (id) {
1417 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001418 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001419 }
1420 }
1421 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1422
Mike Day0dc3f442013-09-05 14:41:35 -04001423 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001424 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001425 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1426 new_block->idstr);
1427 abort();
1428 }
1429 }
Mike Day0dc3f442013-09-05 14:41:35 -04001430 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001431}
1432
Mike Dayae3a7042013-09-05 14:41:35 -04001433/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001434void qemu_ram_unset_idstr(ram_addr_t addr)
1435{
Mike Dayae3a7042013-09-05 14:41:35 -04001436 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001437
Mike Dayae3a7042013-09-05 14:41:35 -04001438 /* FIXME: arch_init.c assumes that this is not called throughout
1439 * migration. Ignore the problem since hot-unplug during migration
1440 * does not work anyway.
1441 */
1442
Mike Day0dc3f442013-09-05 14:41:35 -04001443 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001444 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001445 if (block) {
1446 memset(block->idstr, 0, sizeof(block->idstr));
1447 }
Mike Day0dc3f442013-09-05 14:41:35 -04001448 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001449}
1450
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001451static int memory_try_enable_merging(void *addr, size_t len)
1452{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001453 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001454 /* disabled by the user */
1455 return 0;
1456 }
1457
1458 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1459}
1460
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001461/* Only legal before guest might have detected the memory size: e.g. on
1462 * incoming migration, or right after reset.
1463 *
1464 * As memory core doesn't know how is memory accessed, it is up to
1465 * resize callback to update device state and/or add assertions to detect
1466 * misuse, if necessary.
1467 */
1468int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1469{
1470 RAMBlock *block = find_ram_block(base);
1471
1472 assert(block);
1473
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001474 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001475
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001476 if (block->used_length == newsize) {
1477 return 0;
1478 }
1479
1480 if (!(block->flags & RAM_RESIZEABLE)) {
1481 error_setg_errno(errp, EINVAL,
1482 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1483 " in != 0x" RAM_ADDR_FMT, block->idstr,
1484 newsize, block->used_length);
1485 return -EINVAL;
1486 }
1487
1488 if (block->max_length < newsize) {
1489 error_setg_errno(errp, EINVAL,
1490 "Length too large: %s: 0x" RAM_ADDR_FMT
1491 " > 0x" RAM_ADDR_FMT, block->idstr,
1492 newsize, block->max_length);
1493 return -EINVAL;
1494 }
1495
1496 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1497 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001498 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1499 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001500 memory_region_set_size(block->mr, newsize);
1501 if (block->resized) {
1502 block->resized(block->idstr, newsize, block->host);
1503 }
1504 return 0;
1505}
1506
Hu Taoef701d72014-09-09 13:27:54 +08001507static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001508{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001509 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001510 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001511 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001512 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001513
1514 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001515
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001516 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001517 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001518
1519 if (!new_block->host) {
1520 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001521 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001522 new_block->mr, &err);
1523 if (err) {
1524 error_propagate(errp, err);
1525 qemu_mutex_unlock_ramlist();
1526 return -1;
1527 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001528 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001529 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001530 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001531 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001532 error_setg_errno(errp, errno,
1533 "cannot set up guest memory '%s'",
1534 memory_region_name(new_block->mr));
1535 qemu_mutex_unlock_ramlist();
1536 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001537 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001538 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001539 }
1540 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001541
Li Zhijiandd631692015-07-02 20:18:06 +08001542 new_ram_size = MAX(old_ram_size,
1543 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1544 if (new_ram_size > old_ram_size) {
1545 migration_bitmap_extend(old_ram_size, new_ram_size);
1546 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001547 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1548 * QLIST (which has an RCU-friendly variant) does not have insertion at
1549 * tail, so save the last element in last_block.
1550 */
Mike Day0dc3f442013-09-05 14:41:35 -04001551 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001552 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001553 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001554 break;
1555 }
1556 }
1557 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001558 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001559 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001560 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001561 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001562 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001563 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001564 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001565
Mike Day0dc3f442013-09-05 14:41:35 -04001566 /* Write list before version */
1567 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001568 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001569 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001570
Juan Quintela2152f5c2013-10-08 13:52:02 +02001571 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1572
1573 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001574 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001575
1576 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001577 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1578 ram_list.dirty_memory[i] =
1579 bitmap_zero_extend(ram_list.dirty_memory[i],
1580 old_ram_size, new_ram_size);
1581 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001582 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001583 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001584 new_block->used_length,
1585 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001586
Paolo Bonzinia904c912015-01-21 16:18:35 +01001587 if (new_block->host) {
1588 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1589 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1590 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1591 if (kvm_enabled()) {
1592 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1593 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001594 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001595
1596 return new_block->offset;
1597}
1598
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001599#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001600ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001601 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001602 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001603{
1604 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001605 ram_addr_t addr;
1606 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001607
1608 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001609 error_setg(errp, "-mem-path not supported with Xen");
1610 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001611 }
1612
1613 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1614 /*
1615 * file_ram_alloc() needs to allocate just like
1616 * phys_mem_alloc, but we haven't bothered to provide
1617 * a hook there.
1618 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001619 error_setg(errp,
1620 "-mem-path not supported with this accelerator");
1621 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001622 }
1623
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001624 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001625 new_block = g_malloc0(sizeof(*new_block));
1626 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001627 new_block->used_length = size;
1628 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001629 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001630 new_block->host = file_ram_alloc(new_block, size,
1631 mem_path, errp);
1632 if (!new_block->host) {
1633 g_free(new_block);
1634 return -1;
1635 }
1636
Hu Taoef701d72014-09-09 13:27:54 +08001637 addr = ram_block_add(new_block, &local_err);
1638 if (local_err) {
1639 g_free(new_block);
1640 error_propagate(errp, local_err);
1641 return -1;
1642 }
1643 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001644}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001645#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001646
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001647static
1648ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1649 void (*resized)(const char*,
1650 uint64_t length,
1651 void *host),
1652 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001653 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001654{
1655 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001656 ram_addr_t addr;
1657 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001658
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001659 size = HOST_PAGE_ALIGN(size);
1660 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001661 new_block = g_malloc0(sizeof(*new_block));
1662 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001663 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001664 new_block->used_length = size;
1665 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001666 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001667 new_block->fd = -1;
1668 new_block->host = host;
1669 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001670 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001672 if (resizeable) {
1673 new_block->flags |= RAM_RESIZEABLE;
1674 }
Hu Taoef701d72014-09-09 13:27:54 +08001675 addr = ram_block_add(new_block, &local_err);
1676 if (local_err) {
1677 g_free(new_block);
1678 error_propagate(errp, local_err);
1679 return -1;
1680 }
1681 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001682}
1683
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001684ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1685 MemoryRegion *mr, Error **errp)
1686{
1687 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1688}
1689
Hu Taoef701d72014-09-09 13:27:54 +08001690ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001691{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001692 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1693}
1694
1695ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1696 void (*resized)(const char*,
1697 uint64_t length,
1698 void *host),
1699 MemoryRegion *mr, Error **errp)
1700{
1701 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001702}
bellarde9a1ab12007-02-08 23:08:38 +00001703
Paolo Bonzini43771532013-09-09 17:58:40 +02001704static void reclaim_ramblock(RAMBlock *block)
1705{
1706 if (block->flags & RAM_PREALLOC) {
1707 ;
1708 } else if (xen_enabled()) {
1709 xen_invalidate_map_cache_entry(block->host);
1710#ifndef _WIN32
1711 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001712 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001713 close(block->fd);
1714#endif
1715 } else {
1716 qemu_anon_ram_free(block->host, block->max_length);
1717 }
1718 g_free(block);
1719}
1720
Anthony Liguoric227f092009-10-01 16:12:16 -05001721void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001722{
Alex Williamson04b16652010-07-02 11:13:17 -06001723 RAMBlock *block;
1724
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001725 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001726 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001727 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001728 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001729 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001730 /* Write list before version */
1731 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001732 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001733 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001734 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001735 }
1736 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001737 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001738}
1739
Huang Yingcd19cfa2011-03-02 08:56:19 +01001740#ifndef _WIN32
1741void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1742{
1743 RAMBlock *block;
1744 ram_addr_t offset;
1745 int flags;
1746 void *area, *vaddr;
1747
Mike Day0dc3f442013-09-05 14:41:35 -04001748 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001749 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001750 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001751 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001752 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001753 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001754 } else if (xen_enabled()) {
1755 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001756 } else {
1757 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001758 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001759 flags |= (block->flags & RAM_SHARED ?
1760 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001761 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1762 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001763 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001764 /*
1765 * Remap needs to match alloc. Accelerators that
1766 * set phys_mem_alloc never remap. If they did,
1767 * we'd need a remap hook here.
1768 */
1769 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1770
Huang Yingcd19cfa2011-03-02 08:56:19 +01001771 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1772 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1773 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774 }
1775 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001776 fprintf(stderr, "Could not remap addr: "
1777 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001778 length, addr);
1779 exit(1);
1780 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001781 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001782 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001783 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001784 }
1785 }
1786}
1787#endif /* !_WIN32 */
1788
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001789int qemu_get_ram_fd(ram_addr_t addr)
1790{
Mike Dayae3a7042013-09-05 14:41:35 -04001791 RAMBlock *block;
1792 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001793
Mike Day0dc3f442013-09-05 14:41:35 -04001794 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001795 block = qemu_get_ram_block(addr);
1796 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001797 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001798 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001799}
1800
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001801void qemu_set_ram_fd(ram_addr_t addr, int fd)
1802{
1803 RAMBlock *block;
1804
1805 rcu_read_lock();
1806 block = qemu_get_ram_block(addr);
1807 block->fd = fd;
1808 rcu_read_unlock();
1809}
1810
Damjan Marion3fd74b82014-06-26 23:01:32 +02001811void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1812{
Mike Dayae3a7042013-09-05 14:41:35 -04001813 RAMBlock *block;
1814 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001815
Mike Day0dc3f442013-09-05 14:41:35 -04001816 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001817 block = qemu_get_ram_block(addr);
1818 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001819 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001820 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001821}
1822
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001823/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001824 * This should not be used for general purpose DMA. Use address_space_map
1825 * or address_space_rw instead. For local memory (e.g. video ram) that the
1826 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001827 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001828 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001829 */
1830void *qemu_get_ram_ptr(ram_addr_t addr)
1831{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001832 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001833
1834 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001835 /* We need to check if the requested address is in the RAM
1836 * because we don't want to map the entire memory in QEMU.
1837 * In that case just map until the end of the page.
1838 */
1839 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001840 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001841 }
Mike Dayae3a7042013-09-05 14:41:35 -04001842
1843 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001844 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001845 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001846}
1847
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001848/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001849 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001850 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001851 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001852 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001853static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001854{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001855 RAMBlock *block;
1856 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001857 if (*size == 0) {
1858 return NULL;
1859 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001860
1861 block = qemu_get_ram_block(addr);
1862 offset_inside_block = addr - block->offset;
1863 *size = MIN(*size, block->max_length - offset_inside_block);
1864
1865 if (xen_enabled() && block->host == NULL) {
1866 /* We need to check if the requested address is in the RAM
1867 * because we don't want to map the entire memory in QEMU.
1868 * In that case just map the requested area.
1869 */
1870 if (block->offset == 0) {
1871 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001872 }
1873
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001874 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001875 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001876
1877 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001878}
1879
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001880/*
1881 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1882 * in that RAMBlock.
1883 *
1884 * ptr: Host pointer to look up
1885 * round_offset: If true round the result offset down to a page boundary
1886 * *ram_addr: set to result ram_addr
1887 * *offset: set to result offset within the RAMBlock
1888 *
1889 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001890 *
1891 * By the time this function returns, the returned pointer is not protected
1892 * by RCU anymore. If the caller is not within an RCU critical section and
1893 * does not hold the iothread lock, it must have other means of protecting the
1894 * pointer, such as a reference to the region that includes the incoming
1895 * ram_addr_t.
1896 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001897RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1898 ram_addr_t *ram_addr,
1899 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001900{
pbrook94a6b542009-04-11 17:15:54 +00001901 RAMBlock *block;
1902 uint8_t *host = ptr;
1903
Jan Kiszka868bb332011-06-21 22:59:09 +02001904 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001905 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001906 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001907 block = qemu_get_ram_block(*ram_addr);
1908 if (block) {
1909 *offset = (host - block->host);
1910 }
Mike Day0dc3f442013-09-05 14:41:35 -04001911 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001912 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001913 }
1914
Mike Day0dc3f442013-09-05 14:41:35 -04001915 rcu_read_lock();
1916 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001917 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001918 goto found;
1919 }
1920
Mike Day0dc3f442013-09-05 14:41:35 -04001921 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001922 /* This case append when the block is not mapped. */
1923 if (block->host == NULL) {
1924 continue;
1925 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001926 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001927 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001928 }
pbrook94a6b542009-04-11 17:15:54 +00001929 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001930
Mike Day0dc3f442013-09-05 14:41:35 -04001931 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001932 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001933
1934found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001935 *offset = (host - block->host);
1936 if (round_offset) {
1937 *offset &= TARGET_PAGE_MASK;
1938 }
1939 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001940 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001941 return block;
1942}
1943
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001944/*
1945 * Finds the named RAMBlock
1946 *
1947 * name: The name of RAMBlock to find
1948 *
1949 * Returns: RAMBlock (or NULL if not found)
1950 */
1951RAMBlock *qemu_ram_block_by_name(const char *name)
1952{
1953 RAMBlock *block;
1954
1955 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1956 if (!strcmp(name, block->idstr)) {
1957 return block;
1958 }
1959 }
1960
1961 return NULL;
1962}
1963
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001964/* Some of the softmmu routines need to translate from a host pointer
1965 (typically a TLB entry) back to a ram offset. */
1966MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1967{
1968 RAMBlock *block;
1969 ram_addr_t offset; /* Not used */
1970
1971 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1972
1973 if (!block) {
1974 return NULL;
1975 }
1976
1977 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001978}
Alex Williamsonf471a172010-06-11 11:11:42 -06001979
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001980/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001981static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001982 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001983{
Juan Quintela52159192013-10-08 12:44:04 +02001984 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001985 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001986 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001987 switch (size) {
1988 case 1:
1989 stb_p(qemu_get_ram_ptr(ram_addr), val);
1990 break;
1991 case 2:
1992 stw_p(qemu_get_ram_ptr(ram_addr), val);
1993 break;
1994 case 4:
1995 stl_p(qemu_get_ram_ptr(ram_addr), val);
1996 break;
1997 default:
1998 abort();
1999 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002000 /* Set both VGA and migration bits for simplicity and to remove
2001 * the notdirty callback faster.
2002 */
2003 cpu_physical_memory_set_dirty_range(ram_addr, size,
2004 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002005 /* we remove the notdirty callback only if the code has been
2006 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002007 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002008 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002009 }
bellard1ccde1c2004-02-06 19:46:14 +00002010}
2011
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002012static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2013 unsigned size, bool is_write)
2014{
2015 return is_write;
2016}
2017
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002018static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002019 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002020 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002021 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002022};
2023
pbrook0f459d12008-06-09 00:20:13 +00002024/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002025static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002026{
Andreas Färber93afead2013-08-26 03:41:01 +02002027 CPUState *cpu = current_cpu;
2028 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002029 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002030 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002031 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002032 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002033
Andreas Färberff4700b2013-08-26 18:23:18 +02002034 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002035 /* We re-entered the check after replacing the TB. Now raise
2036 * the debug interrupt so that is will trigger after the
2037 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002038 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002039 return;
2040 }
Andreas Färber93afead2013-08-26 03:41:01 +02002041 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002042 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002043 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2044 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002045 if (flags == BP_MEM_READ) {
2046 wp->flags |= BP_WATCHPOINT_HIT_READ;
2047 } else {
2048 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2049 }
2050 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002051 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002052 if (!cpu->watchpoint_hit) {
2053 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002054 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002055 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002056 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002057 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002058 } else {
2059 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002060 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002061 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002062 }
aliguori06d55cc2008-11-18 20:24:06 +00002063 }
aliguori6e140f22008-11-18 20:37:55 +00002064 } else {
2065 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002066 }
2067 }
2068}
2069
pbrook6658ffb2007-03-16 23:58:11 +00002070/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2071 so these check for a hit then pass through to the normal out-of-line
2072 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002073static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2074 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002075{
Peter Maydell66b9b432015-04-26 16:49:24 +01002076 MemTxResult res;
2077 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002078 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2079 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002080
Peter Maydell66b9b432015-04-26 16:49:24 +01002081 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002082 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002083 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002084 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002085 break;
2086 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002087 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002088 break;
2089 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002090 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002091 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002092 default: abort();
2093 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002094 *pdata = data;
2095 return res;
2096}
2097
2098static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2099 uint64_t val, unsigned size,
2100 MemTxAttrs attrs)
2101{
2102 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002103 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2104 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002105
2106 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2107 switch (size) {
2108 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002109 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002110 break;
2111 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002112 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002113 break;
2114 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002115 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002116 break;
2117 default: abort();
2118 }
2119 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002120}
2121
Avi Kivity1ec9b902012-01-02 12:47:48 +02002122static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002123 .read_with_attrs = watch_mem_read,
2124 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002125 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002126};
pbrook6658ffb2007-03-16 23:58:11 +00002127
Peter Maydellf25a49e2015-04-26 16:49:24 +01002128static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2129 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002130{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002131 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002132 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002133 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002134
blueswir1db7b5422007-05-26 17:36:03 +00002135#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002136 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002137 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002138#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002139 res = address_space_read(subpage->as, addr + subpage->base,
2140 attrs, buf, len);
2141 if (res) {
2142 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002143 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002144 switch (len) {
2145 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002146 *data = ldub_p(buf);
2147 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002148 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002149 *data = lduw_p(buf);
2150 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002151 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002152 *data = ldl_p(buf);
2153 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002154 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002155 *data = ldq_p(buf);
2156 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002157 default:
2158 abort();
2159 }
blueswir1db7b5422007-05-26 17:36:03 +00002160}
2161
Peter Maydellf25a49e2015-04-26 16:49:24 +01002162static MemTxResult subpage_write(void *opaque, hwaddr addr,
2163 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002164{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002165 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002166 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002167
blueswir1db7b5422007-05-26 17:36:03 +00002168#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002169 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002170 " value %"PRIx64"\n",
2171 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002172#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002173 switch (len) {
2174 case 1:
2175 stb_p(buf, value);
2176 break;
2177 case 2:
2178 stw_p(buf, value);
2179 break;
2180 case 4:
2181 stl_p(buf, value);
2182 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002183 case 8:
2184 stq_p(buf, value);
2185 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002186 default:
2187 abort();
2188 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002189 return address_space_write(subpage->as, addr + subpage->base,
2190 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002191}
2192
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002193static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002194 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002195{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002196 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002197#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002198 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002200#endif
2201
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002202 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002203 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002204}
2205
Avi Kivity70c68e42012-01-02 12:32:48 +02002206static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002207 .read_with_attrs = subpage_read,
2208 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002209 .impl.min_access_size = 1,
2210 .impl.max_access_size = 8,
2211 .valid.min_access_size = 1,
2212 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002213 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002214 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002215};
2216
Anthony Liguoric227f092009-10-01 16:12:16 -05002217static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002218 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002219{
2220 int idx, eidx;
2221
2222 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2223 return -1;
2224 idx = SUBPAGE_IDX(start);
2225 eidx = SUBPAGE_IDX(end);
2226#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002227 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2228 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002229#endif
blueswir1db7b5422007-05-26 17:36:03 +00002230 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002231 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002232 }
2233
2234 return 0;
2235}
2236
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002237static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002238{
Anthony Liguoric227f092009-10-01 16:12:16 -05002239 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002240
Anthony Liguori7267c092011-08-20 22:09:37 -05002241 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002242
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002243 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002244 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002245 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002246 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002247 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002248#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002249 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2250 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002251#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002252 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002253
2254 return mmio;
2255}
2256
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002257static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2258 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002259{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002260 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002261 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002262 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002263 .mr = mr,
2264 .offset_within_address_space = 0,
2265 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002266 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002267 };
2268
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002269 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002270}
2271
Peter Maydella54c87b2016-01-21 14:15:05 +00002272MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002273{
Peter Maydella54c87b2016-01-21 14:15:05 +00002274 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2275 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002276 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002277 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002278
2279 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002280}
2281
Avi Kivitye9179ce2009-06-14 11:38:52 +03002282static void io_mem_init(void)
2283{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002284 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002285 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002286 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002287 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002288 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002289 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002290 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002291}
2292
Avi Kivityac1970f2012-10-03 16:22:53 +02002293static void mem_begin(MemoryListener *listener)
2294{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002295 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002296 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2297 uint16_t n;
2298
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002299 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002300 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002301 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002303 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002304 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002305 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002306 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002307
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002308 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002309 d->as = as;
2310 as->next_dispatch = d;
2311}
2312
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002313static void address_space_dispatch_free(AddressSpaceDispatch *d)
2314{
2315 phys_sections_free(&d->map);
2316 g_free(d);
2317}
2318
Paolo Bonzini00752702013-05-29 12:13:54 +02002319static void mem_commit(MemoryListener *listener)
2320{
2321 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002322 AddressSpaceDispatch *cur = as->dispatch;
2323 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002324
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002325 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002326
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002327 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002328 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002329 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002330 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002331}
2332
Avi Kivity1d711482012-10-02 18:54:45 +02002333static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002334{
Peter Maydell32857f42015-10-01 15:29:50 +01002335 CPUAddressSpace *cpuas;
2336 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002337
2338 /* since each CPU stores ram addresses in its TLB cache, we must
2339 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002340 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2341 cpu_reloading_memory_map();
2342 /* The CPU and TLB are protected by the iothread lock.
2343 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2344 * may have split the RCU critical section.
2345 */
2346 d = atomic_rcu_read(&cpuas->as->dispatch);
2347 cpuas->memory_dispatch = d;
2348 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002349}
2350
Avi Kivityac1970f2012-10-03 16:22:53 +02002351void address_space_init_dispatch(AddressSpace *as)
2352{
Paolo Bonzini00752702013-05-29 12:13:54 +02002353 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002354 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002355 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002356 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002357 .region_add = mem_add,
2358 .region_nop = mem_add,
2359 .priority = 0,
2360 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002361 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002362}
2363
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002364void address_space_unregister(AddressSpace *as)
2365{
2366 memory_listener_unregister(&as->dispatch_listener);
2367}
2368
Avi Kivity83f3c252012-10-07 12:59:55 +02002369void address_space_destroy_dispatch(AddressSpace *as)
2370{
2371 AddressSpaceDispatch *d = as->dispatch;
2372
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002373 atomic_rcu_set(&as->dispatch, NULL);
2374 if (d) {
2375 call_rcu(d, address_space_dispatch_free, rcu);
2376 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002377}
2378
Avi Kivity62152b82011-07-26 14:26:14 +03002379static void memory_map_init(void)
2380{
Anthony Liguori7267c092011-08-20 22:09:37 -05002381 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002382
Paolo Bonzini57271d62013-11-07 17:14:37 +01002383 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002384 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002385
Anthony Liguori7267c092011-08-20 22:09:37 -05002386 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002387 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2388 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002389 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002390}
2391
2392MemoryRegion *get_system_memory(void)
2393{
2394 return system_memory;
2395}
2396
Avi Kivity309cb472011-08-08 16:09:03 +03002397MemoryRegion *get_system_io(void)
2398{
2399 return system_io;
2400}
2401
pbrooke2eef172008-06-08 01:09:01 +00002402#endif /* !defined(CONFIG_USER_ONLY) */
2403
bellard13eb76e2004-01-24 15:23:36 +00002404/* physical memory access (slow version, mainly for debug) */
2405#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002406int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002407 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002408{
2409 int l, flags;
2410 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002411 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002412
2413 while (len > 0) {
2414 page = addr & TARGET_PAGE_MASK;
2415 l = (page + TARGET_PAGE_SIZE) - addr;
2416 if (l > len)
2417 l = len;
2418 flags = page_get_flags(page);
2419 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002420 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002421 if (is_write) {
2422 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002423 return -1;
bellard579a97f2007-11-11 14:26:47 +00002424 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002425 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002426 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002427 memcpy(p, buf, l);
2428 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002429 } else {
2430 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002431 return -1;
bellard579a97f2007-11-11 14:26:47 +00002432 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002433 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002434 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002435 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002436 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002437 }
2438 len -= l;
2439 buf += l;
2440 addr += l;
2441 }
Paul Brooka68fe892010-03-01 00:08:59 +00002442 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002443}
bellard8df1cd02005-01-28 22:37:22 +00002444
bellard13eb76e2004-01-24 15:23:36 +00002445#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002446
Paolo Bonzini845b6212015-03-23 11:45:53 +01002447static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002448 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002449{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002450 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2451 /* No early return if dirty_log_mask is or becomes 0, because
2452 * cpu_physical_memory_set_dirty_range will still call
2453 * xen_modified_memory.
2454 */
2455 if (dirty_log_mask) {
2456 dirty_log_mask =
2457 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002458 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002459 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2460 tb_invalidate_phys_range(addr, addr + length);
2461 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2462 }
2463 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002464}
2465
Richard Henderson23326162013-07-08 14:55:59 -07002466static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002467{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002468 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002469
2470 /* Regions are assumed to support 1-4 byte accesses unless
2471 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002472 if (access_size_max == 0) {
2473 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002474 }
Richard Henderson23326162013-07-08 14:55:59 -07002475
2476 /* Bound the maximum access by the alignment of the address. */
2477 if (!mr->ops->impl.unaligned) {
2478 unsigned align_size_max = addr & -addr;
2479 if (align_size_max != 0 && align_size_max < access_size_max) {
2480 access_size_max = align_size_max;
2481 }
2482 }
2483
2484 /* Don't attempt accesses larger than the maximum. */
2485 if (l > access_size_max) {
2486 l = access_size_max;
2487 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002488 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002489
2490 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002491}
2492
Jan Kiszka4840f102015-06-18 18:47:22 +02002493static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002494{
Jan Kiszka4840f102015-06-18 18:47:22 +02002495 bool unlocked = !qemu_mutex_iothread_locked();
2496 bool release_lock = false;
2497
2498 if (unlocked && mr->global_locking) {
2499 qemu_mutex_lock_iothread();
2500 unlocked = false;
2501 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002502 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002503 if (mr->flush_coalesced_mmio) {
2504 if (unlocked) {
2505 qemu_mutex_lock_iothread();
2506 }
2507 qemu_flush_coalesced_mmio_buffer();
2508 if (unlocked) {
2509 qemu_mutex_unlock_iothread();
2510 }
2511 }
2512
2513 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002514}
2515
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002516/* Called within RCU critical section. */
2517static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2518 MemTxAttrs attrs,
2519 const uint8_t *buf,
2520 int len, hwaddr addr1,
2521 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002522{
bellard13eb76e2004-01-24 15:23:36 +00002523 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002524 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002525 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002526 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002527
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002528 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002529 if (!memory_access_is_direct(mr, true)) {
2530 release_lock |= prepare_mmio_access(mr);
2531 l = memory_access_size(mr, l, addr1);
2532 /* XXX: could force current_cpu to NULL to avoid
2533 potential bugs */
2534 switch (l) {
2535 case 8:
2536 /* 64 bit write access */
2537 val = ldq_p(buf);
2538 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2539 attrs);
2540 break;
2541 case 4:
2542 /* 32 bit write access */
2543 val = ldl_p(buf);
2544 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2545 attrs);
2546 break;
2547 case 2:
2548 /* 16 bit write access */
2549 val = lduw_p(buf);
2550 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2551 attrs);
2552 break;
2553 case 1:
2554 /* 8 bit write access */
2555 val = ldub_p(buf);
2556 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2557 attrs);
2558 break;
2559 default:
2560 abort();
bellard13eb76e2004-01-24 15:23:36 +00002561 }
2562 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002563 addr1 += memory_region_get_ram_addr(mr);
2564 /* RAM case */
2565 ptr = qemu_get_ram_ptr(addr1);
2566 memcpy(ptr, buf, l);
2567 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002568 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002569
2570 if (release_lock) {
2571 qemu_mutex_unlock_iothread();
2572 release_lock = false;
2573 }
2574
bellard13eb76e2004-01-24 15:23:36 +00002575 len -= l;
2576 buf += l;
2577 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002578
2579 if (!len) {
2580 break;
2581 }
2582
2583 l = len;
2584 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002585 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002586
Peter Maydell3b643492015-04-26 16:49:23 +01002587 return result;
bellard13eb76e2004-01-24 15:23:36 +00002588}
bellard8df1cd02005-01-28 22:37:22 +00002589
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002590MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2591 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002592{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002593 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002594 hwaddr addr1;
2595 MemoryRegion *mr;
2596 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002597
2598 if (len > 0) {
2599 rcu_read_lock();
2600 l = len;
2601 mr = address_space_translate(as, addr, &addr1, &l, true);
2602 result = address_space_write_continue(as, addr, attrs, buf, len,
2603 addr1, l, mr);
2604 rcu_read_unlock();
2605 }
2606
2607 return result;
2608}
2609
2610/* Called within RCU critical section. */
2611MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2612 MemTxAttrs attrs, uint8_t *buf,
2613 int len, hwaddr addr1, hwaddr l,
2614 MemoryRegion *mr)
2615{
2616 uint8_t *ptr;
2617 uint64_t val;
2618 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002619 bool release_lock = false;
2620
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002621 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002622 if (!memory_access_is_direct(mr, false)) {
2623 /* I/O case */
2624 release_lock |= prepare_mmio_access(mr);
2625 l = memory_access_size(mr, l, addr1);
2626 switch (l) {
2627 case 8:
2628 /* 64 bit read access */
2629 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2630 attrs);
2631 stq_p(buf, val);
2632 break;
2633 case 4:
2634 /* 32 bit read access */
2635 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2636 attrs);
2637 stl_p(buf, val);
2638 break;
2639 case 2:
2640 /* 16 bit read access */
2641 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2642 attrs);
2643 stw_p(buf, val);
2644 break;
2645 case 1:
2646 /* 8 bit read access */
2647 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2648 attrs);
2649 stb_p(buf, val);
2650 break;
2651 default:
2652 abort();
2653 }
2654 } else {
2655 /* RAM case */
2656 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2657 memcpy(buf, ptr, l);
2658 }
2659
2660 if (release_lock) {
2661 qemu_mutex_unlock_iothread();
2662 release_lock = false;
2663 }
2664
2665 len -= l;
2666 buf += l;
2667 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002668
2669 if (!len) {
2670 break;
2671 }
2672
2673 l = len;
2674 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002675 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002676
2677 return result;
2678}
2679
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002680MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2681 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002682{
2683 hwaddr l;
2684 hwaddr addr1;
2685 MemoryRegion *mr;
2686 MemTxResult result = MEMTX_OK;
2687
2688 if (len > 0) {
2689 rcu_read_lock();
2690 l = len;
2691 mr = address_space_translate(as, addr, &addr1, &l, false);
2692 result = address_space_read_continue(as, addr, attrs, buf, len,
2693 addr1, l, mr);
2694 rcu_read_unlock();
2695 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002696
2697 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002698}
2699
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002700MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2701 uint8_t *buf, int len, bool is_write)
2702{
2703 if (is_write) {
2704 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2705 } else {
2706 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2707 }
2708}
Avi Kivityac1970f2012-10-03 16:22:53 +02002709
Avi Kivitya8170e52012-10-23 12:30:10 +02002710void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002711 int len, int is_write)
2712{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002713 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2714 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002715}
2716
Alexander Graf582b55a2013-12-11 14:17:44 +01002717enum write_rom_type {
2718 WRITE_DATA,
2719 FLUSH_CACHE,
2720};
2721
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002722static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002723 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002724{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002725 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002726 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002727 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002728 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002729
Paolo Bonzini41063e12015-03-18 14:21:43 +01002730 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002731 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002732 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002733 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002734
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002735 if (!(memory_region_is_ram(mr) ||
2736 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002737 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002738 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002739 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002740 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002741 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002742 switch (type) {
2743 case WRITE_DATA:
2744 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002745 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002746 break;
2747 case FLUSH_CACHE:
2748 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2749 break;
2750 }
bellardd0ecd2a2006-04-23 17:14:48 +00002751 }
2752 len -= l;
2753 buf += l;
2754 addr += l;
2755 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002756 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002757}
2758
Alexander Graf582b55a2013-12-11 14:17:44 +01002759/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002760void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002761 const uint8_t *buf, int len)
2762{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002763 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002764}
2765
2766void cpu_flush_icache_range(hwaddr start, int len)
2767{
2768 /*
2769 * This function should do the same thing as an icache flush that was
2770 * triggered from within the guest. For TCG we are always cache coherent,
2771 * so there is no need to flush anything. For KVM / Xen we need to flush
2772 * the host's instruction cache at least.
2773 */
2774 if (tcg_enabled()) {
2775 return;
2776 }
2777
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002778 cpu_physical_memory_write_rom_internal(&address_space_memory,
2779 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002780}
2781
aliguori6d16c2f2009-01-22 16:59:11 +00002782typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002783 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002784 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002785 hwaddr addr;
2786 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002787 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002788} BounceBuffer;
2789
2790static BounceBuffer bounce;
2791
aliguoriba223c22009-01-22 16:59:16 +00002792typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002793 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002794 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002795} MapClient;
2796
Fam Zheng38e047b2015-03-16 17:03:35 +08002797QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002798static QLIST_HEAD(map_client_list, MapClient) map_client_list
2799 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002800
Fam Zhenge95205e2015-03-16 17:03:37 +08002801static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002802{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002803 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002804 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002805}
2806
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002807static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002808{
2809 MapClient *client;
2810
Blue Swirl72cf2d42009-09-12 07:36:22 +00002811 while (!QLIST_EMPTY(&map_client_list)) {
2812 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002813 qemu_bh_schedule(client->bh);
2814 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002815 }
2816}
2817
Fam Zhenge95205e2015-03-16 17:03:37 +08002818void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002819{
2820 MapClient *client = g_malloc(sizeof(*client));
2821
Fam Zheng38e047b2015-03-16 17:03:35 +08002822 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002823 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002824 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002825 if (!atomic_read(&bounce.in_use)) {
2826 cpu_notify_map_clients_locked();
2827 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002828 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002829}
2830
Fam Zheng38e047b2015-03-16 17:03:35 +08002831void cpu_exec_init_all(void)
2832{
2833 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002834 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002835 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002836 qemu_mutex_init(&map_client_list_lock);
2837}
2838
Fam Zhenge95205e2015-03-16 17:03:37 +08002839void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002840{
Fam Zhenge95205e2015-03-16 17:03:37 +08002841 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002842
Fam Zhenge95205e2015-03-16 17:03:37 +08002843 qemu_mutex_lock(&map_client_list_lock);
2844 QLIST_FOREACH(client, &map_client_list, link) {
2845 if (client->bh == bh) {
2846 cpu_unregister_map_client_do(client);
2847 break;
2848 }
2849 }
2850 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002851}
2852
2853static void cpu_notify_map_clients(void)
2854{
Fam Zheng38e047b2015-03-16 17:03:35 +08002855 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002856 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002857 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002858}
2859
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002860bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2861{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002862 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002863 hwaddr l, xlat;
2864
Paolo Bonzini41063e12015-03-18 14:21:43 +01002865 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002866 while (len > 0) {
2867 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002868 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2869 if (!memory_access_is_direct(mr, is_write)) {
2870 l = memory_access_size(mr, l, addr);
2871 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002872 return false;
2873 }
2874 }
2875
2876 len -= l;
2877 addr += l;
2878 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002879 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002880 return true;
2881}
2882
aliguori6d16c2f2009-01-22 16:59:11 +00002883/* Map a physical memory region into a host virtual address.
2884 * May map a subset of the requested range, given by and returned in *plen.
2885 * May return NULL if resources needed to perform the mapping are exhausted.
2886 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002887 * Use cpu_register_map_client() to know when retrying the map operation is
2888 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002889 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002890void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002891 hwaddr addr,
2892 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002893 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002894{
Avi Kivitya8170e52012-10-23 12:30:10 +02002895 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002896 hwaddr done = 0;
2897 hwaddr l, xlat, base;
2898 MemoryRegion *mr, *this_mr;
2899 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002900 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002901
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002902 if (len == 0) {
2903 return NULL;
2904 }
aliguori6d16c2f2009-01-22 16:59:11 +00002905
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002906 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002907 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002908 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002909
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002910 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002911 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002912 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002913 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002914 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002915 /* Avoid unbounded allocations */
2916 l = MIN(l, TARGET_PAGE_SIZE);
2917 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002918 bounce.addr = addr;
2919 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002920
2921 memory_region_ref(mr);
2922 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002923 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002924 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2925 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002926 }
aliguori6d16c2f2009-01-22 16:59:11 +00002927
Paolo Bonzini41063e12015-03-18 14:21:43 +01002928 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002929 *plen = l;
2930 return bounce.buffer;
2931 }
2932
2933 base = xlat;
2934 raddr = memory_region_get_ram_addr(mr);
2935
2936 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002937 len -= l;
2938 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002939 done += l;
2940 if (len == 0) {
2941 break;
2942 }
2943
2944 l = len;
2945 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2946 if (this_mr != mr || xlat != base + done) {
2947 break;
2948 }
aliguori6d16c2f2009-01-22 16:59:11 +00002949 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002950
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002951 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002952 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002953 ptr = qemu_ram_ptr_length(raddr + base, plen);
2954 rcu_read_unlock();
2955
2956 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002957}
2958
Avi Kivityac1970f2012-10-03 16:22:53 +02002959/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002960 * Will also mark the memory as dirty if is_write == 1. access_len gives
2961 * the amount of memory that was actually read or written by the caller.
2962 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002963void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2964 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002965{
2966 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002967 MemoryRegion *mr;
2968 ram_addr_t addr1;
2969
2970 mr = qemu_ram_addr_from_host(buffer, &addr1);
2971 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002972 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002973 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002974 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002975 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002976 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002977 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002978 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002979 return;
2980 }
2981 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002982 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2983 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002984 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002985 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002986 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002987 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002988 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002989 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002990}
bellardd0ecd2a2006-04-23 17:14:48 +00002991
Avi Kivitya8170e52012-10-23 12:30:10 +02002992void *cpu_physical_memory_map(hwaddr addr,
2993 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002994 int is_write)
2995{
2996 return address_space_map(&address_space_memory, addr, plen, is_write);
2997}
2998
Avi Kivitya8170e52012-10-23 12:30:10 +02002999void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3000 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003001{
3002 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3003}
3004
bellard8df1cd02005-01-28 22:37:22 +00003005/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003006static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3007 MemTxAttrs attrs,
3008 MemTxResult *result,
3009 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003010{
bellard8df1cd02005-01-28 22:37:22 +00003011 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003012 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003013 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003014 hwaddr l = 4;
3015 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003016 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003017 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003018
Paolo Bonzini41063e12015-03-18 14:21:43 +01003019 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003020 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003021 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003022 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003023
bellard8df1cd02005-01-28 22:37:22 +00003024 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003025 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003026#if defined(TARGET_WORDS_BIGENDIAN)
3027 if (endian == DEVICE_LITTLE_ENDIAN) {
3028 val = bswap32(val);
3029 }
3030#else
3031 if (endian == DEVICE_BIG_ENDIAN) {
3032 val = bswap32(val);
3033 }
3034#endif
bellard8df1cd02005-01-28 22:37:22 +00003035 } else {
3036 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003037 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003038 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003039 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003040 switch (endian) {
3041 case DEVICE_LITTLE_ENDIAN:
3042 val = ldl_le_p(ptr);
3043 break;
3044 case DEVICE_BIG_ENDIAN:
3045 val = ldl_be_p(ptr);
3046 break;
3047 default:
3048 val = ldl_p(ptr);
3049 break;
3050 }
Peter Maydell50013112015-04-26 16:49:24 +01003051 r = MEMTX_OK;
3052 }
3053 if (result) {
3054 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003055 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003056 if (release_lock) {
3057 qemu_mutex_unlock_iothread();
3058 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003059 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003060 return val;
3061}
3062
Peter Maydell50013112015-04-26 16:49:24 +01003063uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3064 MemTxAttrs attrs, MemTxResult *result)
3065{
3066 return address_space_ldl_internal(as, addr, attrs, result,
3067 DEVICE_NATIVE_ENDIAN);
3068}
3069
3070uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3071 MemTxAttrs attrs, MemTxResult *result)
3072{
3073 return address_space_ldl_internal(as, addr, attrs, result,
3074 DEVICE_LITTLE_ENDIAN);
3075}
3076
3077uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3078 MemTxAttrs attrs, MemTxResult *result)
3079{
3080 return address_space_ldl_internal(as, addr, attrs, result,
3081 DEVICE_BIG_ENDIAN);
3082}
3083
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003084uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003085{
Peter Maydell50013112015-04-26 16:49:24 +01003086 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003087}
3088
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003089uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003090{
Peter Maydell50013112015-04-26 16:49:24 +01003091 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003092}
3093
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003094uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003095{
Peter Maydell50013112015-04-26 16:49:24 +01003096 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003097}
3098
bellard84b7b8e2005-11-28 21:19:04 +00003099/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003100static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3101 MemTxAttrs attrs,
3102 MemTxResult *result,
3103 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003104{
bellard84b7b8e2005-11-28 21:19:04 +00003105 uint8_t *ptr;
3106 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003107 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003108 hwaddr l = 8;
3109 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003110 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003111 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003112
Paolo Bonzini41063e12015-03-18 14:21:43 +01003113 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003114 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003115 false);
3116 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003117 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003118
bellard84b7b8e2005-11-28 21:19:04 +00003119 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003120 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003121#if defined(TARGET_WORDS_BIGENDIAN)
3122 if (endian == DEVICE_LITTLE_ENDIAN) {
3123 val = bswap64(val);
3124 }
3125#else
3126 if (endian == DEVICE_BIG_ENDIAN) {
3127 val = bswap64(val);
3128 }
3129#endif
bellard84b7b8e2005-11-28 21:19:04 +00003130 } else {
3131 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003132 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003133 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003134 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135 switch (endian) {
3136 case DEVICE_LITTLE_ENDIAN:
3137 val = ldq_le_p(ptr);
3138 break;
3139 case DEVICE_BIG_ENDIAN:
3140 val = ldq_be_p(ptr);
3141 break;
3142 default:
3143 val = ldq_p(ptr);
3144 break;
3145 }
Peter Maydell50013112015-04-26 16:49:24 +01003146 r = MEMTX_OK;
3147 }
3148 if (result) {
3149 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003150 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003151 if (release_lock) {
3152 qemu_mutex_unlock_iothread();
3153 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003154 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003155 return val;
3156}
3157
Peter Maydell50013112015-04-26 16:49:24 +01003158uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3159 MemTxAttrs attrs, MemTxResult *result)
3160{
3161 return address_space_ldq_internal(as, addr, attrs, result,
3162 DEVICE_NATIVE_ENDIAN);
3163}
3164
3165uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3166 MemTxAttrs attrs, MemTxResult *result)
3167{
3168 return address_space_ldq_internal(as, addr, attrs, result,
3169 DEVICE_LITTLE_ENDIAN);
3170}
3171
3172uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3173 MemTxAttrs attrs, MemTxResult *result)
3174{
3175 return address_space_ldq_internal(as, addr, attrs, result,
3176 DEVICE_BIG_ENDIAN);
3177}
3178
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003179uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003180{
Peter Maydell50013112015-04-26 16:49:24 +01003181 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003182}
3183
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003184uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003185{
Peter Maydell50013112015-04-26 16:49:24 +01003186 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003187}
3188
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003189uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003190{
Peter Maydell50013112015-04-26 16:49:24 +01003191 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003192}
3193
bellardaab33092005-10-30 20:48:42 +00003194/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003195uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3196 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003197{
3198 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003199 MemTxResult r;
3200
3201 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3202 if (result) {
3203 *result = r;
3204 }
bellardaab33092005-10-30 20:48:42 +00003205 return val;
3206}
3207
Peter Maydell50013112015-04-26 16:49:24 +01003208uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3209{
3210 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3211}
3212
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003213/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003214static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3215 hwaddr addr,
3216 MemTxAttrs attrs,
3217 MemTxResult *result,
3218 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003219{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003220 uint8_t *ptr;
3221 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003222 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003223 hwaddr l = 2;
3224 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003225 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003226 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003227
Paolo Bonzini41063e12015-03-18 14:21:43 +01003228 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003229 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003230 false);
3231 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003232 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003233
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003234 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003235 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003236#if defined(TARGET_WORDS_BIGENDIAN)
3237 if (endian == DEVICE_LITTLE_ENDIAN) {
3238 val = bswap16(val);
3239 }
3240#else
3241 if (endian == DEVICE_BIG_ENDIAN) {
3242 val = bswap16(val);
3243 }
3244#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003245 } else {
3246 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003247 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003248 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003249 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250 switch (endian) {
3251 case DEVICE_LITTLE_ENDIAN:
3252 val = lduw_le_p(ptr);
3253 break;
3254 case DEVICE_BIG_ENDIAN:
3255 val = lduw_be_p(ptr);
3256 break;
3257 default:
3258 val = lduw_p(ptr);
3259 break;
3260 }
Peter Maydell50013112015-04-26 16:49:24 +01003261 r = MEMTX_OK;
3262 }
3263 if (result) {
3264 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003265 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003266 if (release_lock) {
3267 qemu_mutex_unlock_iothread();
3268 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003269 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003270 return val;
bellardaab33092005-10-30 20:48:42 +00003271}
3272
Peter Maydell50013112015-04-26 16:49:24 +01003273uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3274 MemTxAttrs attrs, MemTxResult *result)
3275{
3276 return address_space_lduw_internal(as, addr, attrs, result,
3277 DEVICE_NATIVE_ENDIAN);
3278}
3279
3280uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3281 MemTxAttrs attrs, MemTxResult *result)
3282{
3283 return address_space_lduw_internal(as, addr, attrs, result,
3284 DEVICE_LITTLE_ENDIAN);
3285}
3286
3287uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3288 MemTxAttrs attrs, MemTxResult *result)
3289{
3290 return address_space_lduw_internal(as, addr, attrs, result,
3291 DEVICE_BIG_ENDIAN);
3292}
3293
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003294uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003295{
Peter Maydell50013112015-04-26 16:49:24 +01003296 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003297}
3298
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003299uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003300{
Peter Maydell50013112015-04-26 16:49:24 +01003301 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003302}
3303
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003304uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003305{
Peter Maydell50013112015-04-26 16:49:24 +01003306 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003307}
3308
bellard8df1cd02005-01-28 22:37:22 +00003309/* warning: addr must be aligned. The ram page is not masked as dirty
3310 and the code inside is not invalidated. It is useful if the dirty
3311 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003312void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3313 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003314{
bellard8df1cd02005-01-28 22:37:22 +00003315 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003316 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003317 hwaddr l = 4;
3318 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003319 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003320 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003321 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003322
Paolo Bonzini41063e12015-03-18 14:21:43 +01003323 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003324 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003325 true);
3326 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003327 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003328
Peter Maydell50013112015-04-26 16:49:24 +01003329 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003330 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003331 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003332 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003333 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003334
Paolo Bonzini845b6212015-03-23 11:45:53 +01003335 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3336 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003337 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003338 r = MEMTX_OK;
3339 }
3340 if (result) {
3341 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003342 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003343 if (release_lock) {
3344 qemu_mutex_unlock_iothread();
3345 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003346 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003347}
3348
Peter Maydell50013112015-04-26 16:49:24 +01003349void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3350{
3351 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3352}
3353
bellard8df1cd02005-01-28 22:37:22 +00003354/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003355static inline void address_space_stl_internal(AddressSpace *as,
3356 hwaddr addr, uint32_t val,
3357 MemTxAttrs attrs,
3358 MemTxResult *result,
3359 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003360{
bellard8df1cd02005-01-28 22:37:22 +00003361 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003362 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003363 hwaddr l = 4;
3364 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003365 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003366 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003367
Paolo Bonzini41063e12015-03-18 14:21:43 +01003368 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003369 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003370 true);
3371 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003372 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003373
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003374#if defined(TARGET_WORDS_BIGENDIAN)
3375 if (endian == DEVICE_LITTLE_ENDIAN) {
3376 val = bswap32(val);
3377 }
3378#else
3379 if (endian == DEVICE_BIG_ENDIAN) {
3380 val = bswap32(val);
3381 }
3382#endif
Peter Maydell50013112015-04-26 16:49:24 +01003383 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003384 } else {
bellard8df1cd02005-01-28 22:37:22 +00003385 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003386 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003387 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003388 switch (endian) {
3389 case DEVICE_LITTLE_ENDIAN:
3390 stl_le_p(ptr, val);
3391 break;
3392 case DEVICE_BIG_ENDIAN:
3393 stl_be_p(ptr, val);
3394 break;
3395 default:
3396 stl_p(ptr, val);
3397 break;
3398 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003399 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003400 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003401 }
Peter Maydell50013112015-04-26 16:49:24 +01003402 if (result) {
3403 *result = r;
3404 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003405 if (release_lock) {
3406 qemu_mutex_unlock_iothread();
3407 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003408 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003409}
3410
3411void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3412 MemTxAttrs attrs, MemTxResult *result)
3413{
3414 address_space_stl_internal(as, addr, val, attrs, result,
3415 DEVICE_NATIVE_ENDIAN);
3416}
3417
3418void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3419 MemTxAttrs attrs, MemTxResult *result)
3420{
3421 address_space_stl_internal(as, addr, val, attrs, result,
3422 DEVICE_LITTLE_ENDIAN);
3423}
3424
3425void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3426 MemTxAttrs attrs, MemTxResult *result)
3427{
3428 address_space_stl_internal(as, addr, val, attrs, result,
3429 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003430}
3431
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003432void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003433{
Peter Maydell50013112015-04-26 16:49:24 +01003434 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003435}
3436
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003437void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003438{
Peter Maydell50013112015-04-26 16:49:24 +01003439 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003440}
3441
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003442void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003443{
Peter Maydell50013112015-04-26 16:49:24 +01003444 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003445}
3446
bellardaab33092005-10-30 20:48:42 +00003447/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003448void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3449 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003450{
3451 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003452 MemTxResult r;
3453
3454 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3455 if (result) {
3456 *result = r;
3457 }
3458}
3459
3460void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3461{
3462 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003463}
3464
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003465/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003466static inline void address_space_stw_internal(AddressSpace *as,
3467 hwaddr addr, uint32_t val,
3468 MemTxAttrs attrs,
3469 MemTxResult *result,
3470 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003471{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003472 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003473 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003474 hwaddr l = 2;
3475 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003476 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003477 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003478
Paolo Bonzini41063e12015-03-18 14:21:43 +01003479 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003480 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003481 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003482 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003483
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003484#if defined(TARGET_WORDS_BIGENDIAN)
3485 if (endian == DEVICE_LITTLE_ENDIAN) {
3486 val = bswap16(val);
3487 }
3488#else
3489 if (endian == DEVICE_BIG_ENDIAN) {
3490 val = bswap16(val);
3491 }
3492#endif
Peter Maydell50013112015-04-26 16:49:24 +01003493 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003494 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003495 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003496 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003497 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003498 switch (endian) {
3499 case DEVICE_LITTLE_ENDIAN:
3500 stw_le_p(ptr, val);
3501 break;
3502 case DEVICE_BIG_ENDIAN:
3503 stw_be_p(ptr, val);
3504 break;
3505 default:
3506 stw_p(ptr, val);
3507 break;
3508 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003509 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003510 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003511 }
Peter Maydell50013112015-04-26 16:49:24 +01003512 if (result) {
3513 *result = r;
3514 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003515 if (release_lock) {
3516 qemu_mutex_unlock_iothread();
3517 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003518 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003519}
3520
3521void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3522 MemTxAttrs attrs, MemTxResult *result)
3523{
3524 address_space_stw_internal(as, addr, val, attrs, result,
3525 DEVICE_NATIVE_ENDIAN);
3526}
3527
3528void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3529 MemTxAttrs attrs, MemTxResult *result)
3530{
3531 address_space_stw_internal(as, addr, val, attrs, result,
3532 DEVICE_LITTLE_ENDIAN);
3533}
3534
3535void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3536 MemTxAttrs attrs, MemTxResult *result)
3537{
3538 address_space_stw_internal(as, addr, val, attrs, result,
3539 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003540}
3541
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003542void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003543{
Peter Maydell50013112015-04-26 16:49:24 +01003544 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003545}
3546
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003547void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003548{
Peter Maydell50013112015-04-26 16:49:24 +01003549 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003550}
3551
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003552void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003553{
Peter Maydell50013112015-04-26 16:49:24 +01003554 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003555}
3556
bellardaab33092005-10-30 20:48:42 +00003557/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003558void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3559 MemTxAttrs attrs, MemTxResult *result)
3560{
3561 MemTxResult r;
3562 val = tswap64(val);
3563 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3564 if (result) {
3565 *result = r;
3566 }
3567}
3568
3569void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3570 MemTxAttrs attrs, MemTxResult *result)
3571{
3572 MemTxResult r;
3573 val = cpu_to_le64(val);
3574 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3575 if (result) {
3576 *result = r;
3577 }
3578}
3579void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3580 MemTxAttrs attrs, MemTxResult *result)
3581{
3582 MemTxResult r;
3583 val = cpu_to_be64(val);
3584 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3585 if (result) {
3586 *result = r;
3587 }
3588}
3589
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003590void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003591{
Peter Maydell50013112015-04-26 16:49:24 +01003592 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003593}
3594
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003595void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003596{
Peter Maydell50013112015-04-26 16:49:24 +01003597 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003598}
3599
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003600void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003601{
Peter Maydell50013112015-04-26 16:49:24 +01003602 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003603}
3604
aliguori5e2972f2009-03-28 17:51:36 +00003605/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003606int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003607 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003608{
3609 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003610 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003611 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003612
3613 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003614 int asidx;
3615 MemTxAttrs attrs;
3616
bellard13eb76e2004-01-24 15:23:36 +00003617 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003618 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3619 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003620 /* if no physical page mapped, return an error */
3621 if (phys_addr == -1)
3622 return -1;
3623 l = (page + TARGET_PAGE_SIZE) - addr;
3624 if (l > len)
3625 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003626 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003627 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003628 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3629 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003630 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003631 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3632 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003633 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003634 }
bellard13eb76e2004-01-24 15:23:36 +00003635 len -= l;
3636 buf += l;
3637 addr += l;
3638 }
3639 return 0;
3640}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003641
3642/*
3643 * Allows code that needs to deal with migration bitmaps etc to still be built
3644 * target independent.
3645 */
3646size_t qemu_target_page_bits(void)
3647{
3648 return TARGET_PAGE_BITS;
3649}
3650
Paul Brooka68fe892010-03-01 00:08:59 +00003651#endif
bellard13eb76e2004-01-24 15:23:36 +00003652
Blue Swirl8e4a4242013-01-06 18:30:17 +00003653/*
3654 * A helper function for the _utterly broken_ virtio device model to find out if
3655 * it's running on a big endian machine. Don't do this at home kids!
3656 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003657bool target_words_bigendian(void);
3658bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003659{
3660#if defined(TARGET_WORDS_BIGENDIAN)
3661 return true;
3662#else
3663 return false;
3664#endif
3665}
3666
Wen Congyang76f35532012-05-07 12:04:18 +08003667#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003668bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003669{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003670 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003671 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003672 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003673
Paolo Bonzini41063e12015-03-18 14:21:43 +01003674 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003675 mr = address_space_translate(&address_space_memory,
3676 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003677
Paolo Bonzini41063e12015-03-18 14:21:43 +01003678 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3679 rcu_read_unlock();
3680 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003681}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003682
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003683int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003684{
3685 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003686 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003687
Mike Day0dc3f442013-09-05 14:41:35 -04003688 rcu_read_lock();
3689 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003690 ret = func(block->idstr, block->host, block->offset,
3691 block->used_length, opaque);
3692 if (ret) {
3693 break;
3694 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003695 }
Mike Day0dc3f442013-09-05 14:41:35 -04003696 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003697 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003698}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003699#endif