blob: 7d67c11601a09f1669a361a1d4282ee55aba2d8c [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000021#include <sys/mman.h>
22#endif
bellard54936002003-05-13 00:25:15 +000023
Stefan Weil055403b2010-10-22 23:03:32 +020024#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000026#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000027#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010028#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020029#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020033#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040048#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020049#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030051#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000052
Paolo Bonzini022c62c2012-12-17 18:19:49 +010053#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020054#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030055#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
pbrooke2eef172008-06-08 01:09:01 +000090#endif
bellard9fa3e852004-01-04 18:06:42 +000091
Andreas Färberbdc44642013-06-24 23:50:24 +020092struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000093/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020095__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000096/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000097 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000098 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010099int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000100
pbrooke2eef172008-06-08 01:09:01 +0000101#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200102
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103typedef struct PhysPageEntry PhysPageEntry;
104
105struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100115#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200117#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118#define P_L2_SIZE (1 << P_L2_BITS)
119
120#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100125 struct rcu_head rcu;
126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133} PhysPageMap;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100136 struct rcu_head rcu;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138 /* This is a multi-level map on the physical address space.
139 * The bottom level has pointers to MemoryRegionSections.
140 */
141 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200142 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200143 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200144};
145
Jan Kiszka90260c62013-05-26 21:46:51 +0200146#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
147typedef struct subpage_t {
148 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200149 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200150 hwaddr base;
151 uint16_t sub_section[TARGET_PAGE_SIZE];
152} subpage_t;
153
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200154#define PHYS_SECTION_UNASSIGNED 0
155#define PHYS_SECTION_NOTDIRTY 1
156#define PHYS_SECTION_ROM 2
157#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200158
pbrooke2eef172008-06-08 01:09:01 +0000159static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300160static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000161static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000162
Avi Kivity1ec9b902012-01-02 12:47:48 +0200163static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100164
165/**
166 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
167 * @cpu: the CPU whose AddressSpace this is
168 * @as: the AddressSpace itself
169 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
170 * @tcg_as_listener: listener for tracking changes to the AddressSpace
171 */
172struct CPUAddressSpace {
173 CPUState *cpu;
174 AddressSpace *as;
175 struct AddressSpaceDispatch *memory_dispatch;
176 MemoryListener tcg_as_listener;
177};
178
pbrook6658ffb2007-03-16 23:58:11 +0000179#endif
bellard54936002003-05-13 00:25:15 +0000180
Paul Brook6d9a1302010-02-28 23:55:53 +0000181#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200182
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200183static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
186 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
188 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189 }
190}
191
Paolo Bonzinidb946042015-05-21 15:12:29 +0200192static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193{
194 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200195 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196 PhysPageEntry e;
197 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200199 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200202 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203
204 e.skip = leaf ? 0 : 1;
205 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200209 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210}
211
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200212static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
213 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200214 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200215{
216 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100217 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200219 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200220 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200222 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100223 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224
Paolo Bonzini03f49952013-11-07 17:14:36 +0100225 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200226 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200227 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200228 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200229 *index += step;
230 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200231 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200232 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200233 }
234 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200235 }
236}
237
Avi Kivityac1970f2012-10-03 16:22:53 +0200238static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200239 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200240 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000241{
Avi Kivity29990972012-02-13 20:21:20 +0200242 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000244
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200245 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000246}
247
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200248/* Compact a non leaf page entry. Simply detect that the entry has a single child,
249 * and update our entry so we can skip it and go directly to the destination.
250 */
251static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
252{
253 unsigned valid_ptr = P_L2_SIZE;
254 int valid = 0;
255 PhysPageEntry *p;
256 int i;
257
258 if (lp->ptr == PHYS_MAP_NODE_NIL) {
259 return;
260 }
261
262 p = nodes[lp->ptr];
263 for (i = 0; i < P_L2_SIZE; i++) {
264 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
265 continue;
266 }
267
268 valid_ptr = i;
269 valid++;
270 if (p[i].skip) {
271 phys_page_compact(&p[i], nodes, compacted);
272 }
273 }
274
275 /* We can only compress if there's only one child. */
276 if (valid != 1) {
277 return;
278 }
279
280 assert(valid_ptr < P_L2_SIZE);
281
282 /* Don't compress if it won't fit in the # of bits we have. */
283 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
284 return;
285 }
286
287 lp->ptr = p[valid_ptr].ptr;
288 if (!p[valid_ptr].skip) {
289 /* If our only child is a leaf, make this a leaf. */
290 /* By design, we should have made this node a leaf to begin with so we
291 * should never reach here.
292 * But since it's so simple to handle this, let's do it just in case we
293 * change this rule.
294 */
295 lp->skip = 0;
296 } else {
297 lp->skip += p[valid_ptr].skip;
298 }
299}
300
301static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
302{
303 DECLARE_BITMAP(compacted, nodes_nb);
304
305 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200306 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307 }
308}
309
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200310static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200311 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000312{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200313 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200314 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200315 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200316
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200317 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200318 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200319 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200320 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200321 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100322 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200323 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200324
325 if (sections[lp.ptr].size.hi ||
326 range_covers_byte(sections[lp.ptr].offset_within_address_space,
327 sections[lp.ptr].size.lo, addr)) {
328 return &sections[lp.ptr];
329 } else {
330 return &sections[PHYS_SECTION_UNASSIGNED];
331 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200332}
333
Blue Swirle5548612012-04-21 13:08:33 +0000334bool memory_region_is_unassigned(MemoryRegion *mr)
335{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200336 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000337 && mr != &io_mem_watch;
338}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100340/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200341static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200342 hwaddr addr,
343 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200344{
Jan Kiszka90260c62013-05-26 21:46:51 +0200345 MemoryRegionSection *section;
346 subpage_t *subpage;
347
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200348 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200349 if (resolve_subpage && section->mr->subpage) {
350 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200351 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 }
353 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200354}
355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200357static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200358address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200359 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200360{
361 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200362 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100363 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200364
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200365 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200366 /* Compute offset within MemoryRegionSection */
367 addr -= section->offset_within_address_space;
368
369 /* Compute offset within MemoryRegion */
370 *xlat = addr + section->offset_within_region;
371
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200372 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200373
374 /* MMIO registers can be expected to perform full-width accesses based only
375 * on their address, without considering adjacent registers that could
376 * decode to completely different MemoryRegions. When such registers
377 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
378 * regions overlap wildly. For this reason we cannot clamp the accesses
379 * here.
380 *
381 * If the length is small (as is the case for address_space_ldl/stl),
382 * everything works fine. If the incoming length is large, however,
383 * the caller really has to do the clamping through memory_access_size.
384 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200385 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200386 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200387 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
388 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200389 return section;
390}
Jan Kiszka90260c62013-05-26 21:46:51 +0200391
Paolo Bonzini41063e12015-03-18 14:21:43 +0100392/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200393MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
394 hwaddr *xlat, hwaddr *plen,
395 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200396{
Avi Kivity30951152012-10-30 13:47:46 +0200397 IOMMUTLBEntry iotlb;
398 MemoryRegionSection *section;
399 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200400
401 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100402 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
403 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200404 mr = section->mr;
405
406 if (!mr->iommu_ops) {
407 break;
408 }
409
Le Tan8d7b8cb2014-08-16 13:55:37 +0800410 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200411 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
412 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700413 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200414 if (!(iotlb.perm & (1 << is_write))) {
415 mr = &io_mem_unassigned;
416 break;
417 }
418
419 as = iotlb.target_as;
420 }
421
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000422 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100423 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700424 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100425 }
426
Avi Kivity30951152012-10-30 13:47:46 +0200427 *xlat = addr;
428 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200429}
430
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100431/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200432MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000433address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200434 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200435{
Avi Kivity30951152012-10-30 13:47:46 +0200436 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000437 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
438
439 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200440
441 assert(!section->mr->iommu_ops);
442 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200443}
bellard9fa3e852004-01-04 18:06:42 +0000444#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000445
Andreas Färberb170fce2013-01-20 20:23:22 +0100446#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000447
Juan Quintelae59fb372009-09-29 22:48:21 +0200448static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200449{
Andreas Färber259186a2013-01-17 18:51:17 +0100450 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200451
aurel323098dba2009-03-07 21:28:24 +0000452 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
453 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100454 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100455 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000456
457 return 0;
458}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200459
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400460static int cpu_common_pre_load(void *opaque)
461{
462 CPUState *cpu = opaque;
463
Paolo Bonziniadee6422014-12-19 12:53:14 +0100464 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400465
466 return 0;
467}
468
469static bool cpu_common_exception_index_needed(void *opaque)
470{
471 CPUState *cpu = opaque;
472
Paolo Bonziniadee6422014-12-19 12:53:14 +0100473 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474}
475
476static const VMStateDescription vmstate_cpu_common_exception_index = {
477 .name = "cpu_common/exception_index",
478 .version_id = 1,
479 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200480 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400481 .fields = (VMStateField[]) {
482 VMSTATE_INT32(exception_index, CPUState),
483 VMSTATE_END_OF_LIST()
484 }
485};
486
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300487static bool cpu_common_crash_occurred_needed(void *opaque)
488{
489 CPUState *cpu = opaque;
490
491 return cpu->crash_occurred;
492}
493
494static const VMStateDescription vmstate_cpu_common_crash_occurred = {
495 .name = "cpu_common/crash_occurred",
496 .version_id = 1,
497 .minimum_version_id = 1,
498 .needed = cpu_common_crash_occurred_needed,
499 .fields = (VMStateField[]) {
500 VMSTATE_BOOL(crash_occurred, CPUState),
501 VMSTATE_END_OF_LIST()
502 }
503};
504
Andreas Färber1a1562f2013-06-17 04:09:11 +0200505const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200506 .name = "cpu_common",
507 .version_id = 1,
508 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400509 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200511 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100512 VMSTATE_UINT32(halted, CPUState),
513 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200514 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400515 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200516 .subsections = (const VMStateDescription*[]) {
517 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300518 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200519 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200520 }
521};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200522
pbrook9656f322008-07-01 20:01:19 +0000523#endif
524
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100525CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400526{
Andreas Färberbdc44642013-06-24 23:50:24 +0200527 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400528
Andreas Färberbdc44642013-06-24 23:50:24 +0200529 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100530 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200531 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100532 }
Glauber Costa950f1472009-06-09 12:15:18 -0400533 }
534
Andreas Färberbdc44642013-06-24 23:50:24 +0200535 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400536}
537
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000538#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000539void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000540{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000541 CPUAddressSpace *newas;
542
543 /* Target code should have set num_ases before calling us */
544 assert(asidx < cpu->num_ases);
545
Peter Maydell56943e82016-01-21 14:15:04 +0000546 if (asidx == 0) {
547 /* address space 0 gets the convenience alias */
548 cpu->as = as;
549 }
550
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000551 /* KVM cannot currently support multiple address spaces. */
552 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000553
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000554 if (!cpu->cpu_ases) {
555 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000556 }
Peter Maydell32857f42015-10-01 15:29:50 +0100557
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000558 newas = &cpu->cpu_ases[asidx];
559 newas->cpu = cpu;
560 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000561 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000562 newas->tcg_as_listener.commit = tcg_commit;
563 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000564 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000566
567AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
568{
569 /* Return the AddressSpace corresponding to the specified index */
570 return cpu->cpu_ases[asidx].as;
571}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000572#endif
573
Bharata B Raob7bca732015-06-23 19:31:13 -0700574#ifndef CONFIG_USER_ONLY
575static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
576
577static int cpu_get_free_index(Error **errp)
578{
579 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
580
581 if (cpu >= MAX_CPUMASK_BITS) {
582 error_setg(errp, "Trying to use more CPUs than max of %d",
583 MAX_CPUMASK_BITS);
584 return -1;
585 }
586
587 bitmap_set(cpu_index_map, cpu, 1);
588 return cpu;
589}
590
591void cpu_exec_exit(CPUState *cpu)
592{
593 if (cpu->cpu_index == -1) {
594 /* cpu_index was never allocated by this @cpu or was already freed. */
595 return;
596 }
597
598 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
599 cpu->cpu_index = -1;
600}
601#else
602
603static int cpu_get_free_index(Error **errp)
604{
605 CPUState *some_cpu;
606 int cpu_index = 0;
607
608 CPU_FOREACH(some_cpu) {
609 cpu_index++;
610 }
611 return cpu_index;
612}
613
614void cpu_exec_exit(CPUState *cpu)
615{
616}
617#endif
618
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700619void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000620{
Andreas Färberb170fce2013-01-20 20:23:22 +0100621 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000622 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700623 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000624
Peter Maydell56943e82016-01-21 14:15:04 +0000625 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000626 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000627
Eduardo Habkost291135b2015-04-27 17:00:33 -0300628#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300629 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000630
631 /* This is a softmmu CPU object, so create a property for it
632 * so users can wire up its memory. (This can't go in qom/cpu.c
633 * because that file is compiled only once for both user-mode
634 * and system builds.) The default if no link is set up is to use
635 * the system address space.
636 */
637 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
638 (Object **)&cpu->memory,
639 qdev_prop_allow_set_link_before_realize,
640 OBJ_PROP_LINK_UNREF_ON_RELEASE,
641 &error_abort);
642 cpu->memory = system_memory;
643 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300644#endif
645
pbrookc2764712009-03-07 15:24:59 +0000646#if defined(CONFIG_USER_ONLY)
647 cpu_list_lock();
648#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700649 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
650 if (local_err) {
651 error_propagate(errp, local_err);
652#if defined(CONFIG_USER_ONLY)
653 cpu_list_unlock();
654#endif
655 return;
bellard6a00d602005-11-21 23:25:50 +0000656 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200657 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000658#if defined(CONFIG_USER_ONLY)
659 cpu_list_unlock();
660#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200661 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
662 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
663 }
pbrookb3c77242008-06-30 16:31:04 +0000664#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600665 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700666 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100667 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200668 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000669#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100670 if (cc->vmsd != NULL) {
671 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
672 }
bellardfd6ce8f2003-05-14 19:00:11 +0000673}
674
Paul Brook94df27f2010-02-28 23:47:45 +0000675#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200676static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000677{
678 tb_invalidate_phys_page_range(pc, pc + 1, 0);
679}
680#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200681static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400682{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000683 MemTxAttrs attrs;
684 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
685 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400686 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000687 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100688 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400689 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400690}
bellardc27004e2005-01-03 23:35:10 +0000691#endif
bellardd720b932004-04-25 17:57:43 +0000692
Paul Brookc527ee82010-03-01 03:31:14 +0000693#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200694void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000695
696{
697}
698
Peter Maydell3ee887e2014-09-12 14:06:48 +0100699int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
700 int flags)
701{
702 return -ENOSYS;
703}
704
705void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
706{
707}
708
Andreas Färber75a34032013-09-02 16:57:02 +0200709int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000710 int flags, CPUWatchpoint **watchpoint)
711{
712 return -ENOSYS;
713}
714#else
pbrook6658ffb2007-03-16 23:58:11 +0000715/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200716int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000717 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000718{
aliguoric0ce9982008-11-25 22:13:57 +0000719 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000720
Peter Maydell05068c02014-09-12 14:06:48 +0100721 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700722 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200723 error_report("tried to set invalid watchpoint at %"
724 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000725 return -EINVAL;
726 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500727 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000728
aliguoria1d1bb32008-11-18 20:07:32 +0000729 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100730 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000731 wp->flags = flags;
732
aliguori2dc9f412008-11-18 20:56:59 +0000733 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200734 if (flags & BP_GDB) {
735 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
736 } else {
737 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
738 }
aliguoria1d1bb32008-11-18 20:07:32 +0000739
Andreas Färber31b030d2013-09-04 01:29:02 +0200740 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000741
742 if (watchpoint)
743 *watchpoint = wp;
744 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000745}
746
aliguoria1d1bb32008-11-18 20:07:32 +0000747/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200748int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000749 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000750{
aliguoria1d1bb32008-11-18 20:07:32 +0000751 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000752
Andreas Färberff4700b2013-08-26 18:23:18 +0200753 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100754 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000755 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200756 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000757 return 0;
758 }
759 }
aliguoria1d1bb32008-11-18 20:07:32 +0000760 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000761}
762
aliguoria1d1bb32008-11-18 20:07:32 +0000763/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200764void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000765{
Andreas Färberff4700b2013-08-26 18:23:18 +0200766 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000767
Andreas Färber31b030d2013-09-04 01:29:02 +0200768 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000769
Anthony Liguori7267c092011-08-20 22:09:37 -0500770 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000771}
772
aliguoria1d1bb32008-11-18 20:07:32 +0000773/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200774void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000775{
aliguoric0ce9982008-11-25 22:13:57 +0000776 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000777
Andreas Färberff4700b2013-08-26 18:23:18 +0200778 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200779 if (wp->flags & mask) {
780 cpu_watchpoint_remove_by_ref(cpu, wp);
781 }
aliguoric0ce9982008-11-25 22:13:57 +0000782 }
aliguoria1d1bb32008-11-18 20:07:32 +0000783}
Peter Maydell05068c02014-09-12 14:06:48 +0100784
785/* Return true if this watchpoint address matches the specified
786 * access (ie the address range covered by the watchpoint overlaps
787 * partially or completely with the address range covered by the
788 * access).
789 */
790static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
791 vaddr addr,
792 vaddr len)
793{
794 /* We know the lengths are non-zero, but a little caution is
795 * required to avoid errors in the case where the range ends
796 * exactly at the top of the address space and so addr + len
797 * wraps round to zero.
798 */
799 vaddr wpend = wp->vaddr + wp->len - 1;
800 vaddr addrend = addr + len - 1;
801
802 return !(addr > wpend || wp->vaddr > addrend);
803}
804
Paul Brookc527ee82010-03-01 03:31:14 +0000805#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000806
807/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200808int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000809 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000810{
aliguoric0ce9982008-11-25 22:13:57 +0000811 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000812
Anthony Liguori7267c092011-08-20 22:09:37 -0500813 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000814
815 bp->pc = pc;
816 bp->flags = flags;
817
aliguori2dc9f412008-11-18 20:56:59 +0000818 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200819 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200820 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200821 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200822 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200823 }
aliguoria1d1bb32008-11-18 20:07:32 +0000824
Andreas Färberf0c3c502013-08-26 21:22:53 +0200825 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000826
Andreas Färber00b941e2013-06-29 18:55:54 +0200827 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000828 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200829 }
aliguoria1d1bb32008-11-18 20:07:32 +0000830 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000831}
832
833/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200834int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000835{
aliguoria1d1bb32008-11-18 20:07:32 +0000836 CPUBreakpoint *bp;
837
Andreas Färberf0c3c502013-08-26 21:22:53 +0200838 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000839 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200840 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000841 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000842 }
bellard4c3a88a2003-07-26 12:06:08 +0000843 }
aliguoria1d1bb32008-11-18 20:07:32 +0000844 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000845}
846
aliguoria1d1bb32008-11-18 20:07:32 +0000847/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200848void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000849{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200850 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
851
852 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000853
Anthony Liguori7267c092011-08-20 22:09:37 -0500854 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000855}
856
857/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200858void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000859{
aliguoric0ce9982008-11-25 22:13:57 +0000860 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000861
Andreas Färberf0c3c502013-08-26 21:22:53 +0200862 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200863 if (bp->flags & mask) {
864 cpu_breakpoint_remove_by_ref(cpu, bp);
865 }
aliguoric0ce9982008-11-25 22:13:57 +0000866 }
bellard4c3a88a2003-07-26 12:06:08 +0000867}
868
bellardc33a3462003-07-29 20:50:33 +0000869/* enable or disable single step mode. EXCP_DEBUG is returned by the
870 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200871void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000872{
Andreas Färbered2803d2013-06-21 20:20:45 +0200873 if (cpu->singlestep_enabled != enabled) {
874 cpu->singlestep_enabled = enabled;
875 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200876 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200877 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100878 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000879 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700880 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000881 }
bellardc33a3462003-07-29 20:50:33 +0000882 }
bellardc33a3462003-07-29 20:50:33 +0000883}
884
Andreas Färbera47dddd2013-09-03 17:38:47 +0200885void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000886{
887 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000888 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000889
890 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000891 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000892 fprintf(stderr, "qemu: fatal: ");
893 vfprintf(stderr, fmt, ap);
894 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200895 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100896 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000897 qemu_log("qemu: fatal: ");
898 qemu_log_vprintf(fmt, ap2);
899 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200900 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000901 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000902 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000903 }
pbrook493ae1f2007-11-23 16:53:59 +0000904 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000905 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300906 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200907#if defined(CONFIG_USER_ONLY)
908 {
909 struct sigaction act;
910 sigfillset(&act.sa_mask);
911 act.sa_handler = SIG_DFL;
912 sigaction(SIGABRT, &act, NULL);
913 }
914#endif
bellard75012672003-06-21 13:11:07 +0000915 abort();
916}
917
bellard01243112004-01-04 15:48:17 +0000918#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400919/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200920static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
921{
922 RAMBlock *block;
923
Paolo Bonzini43771532013-09-09 17:58:40 +0200924 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200925 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200926 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200927 }
Mike Day0dc3f442013-09-05 14:41:35 -0400928 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200929 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200930 goto found;
931 }
932 }
933
934 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
935 abort();
936
937found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200938 /* It is safe to write mru_block outside the iothread lock. This
939 * is what happens:
940 *
941 * mru_block = xxx
942 * rcu_read_unlock()
943 * xxx removed from list
944 * rcu_read_lock()
945 * read mru_block
946 * mru_block = NULL;
947 * call_rcu(reclaim_ramblock, xxx);
948 * rcu_read_unlock()
949 *
950 * atomic_rcu_set is not needed here. The block was already published
951 * when it was placed into the list. Here we're just making an extra
952 * copy of the pointer.
953 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200954 ram_list.mru_block = block;
955 return block;
956}
957
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200958static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000959{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700960 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200961 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200962 RAMBlock *block;
963 ram_addr_t end;
964
965 end = TARGET_PAGE_ALIGN(start + length);
966 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000967
Mike Day0dc3f442013-09-05 14:41:35 -0400968 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200969 block = qemu_get_ram_block(start);
970 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200971 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700972 CPU_FOREACH(cpu) {
973 tlb_reset_dirty(cpu, start1, length);
974 }
Mike Day0dc3f442013-09-05 14:41:35 -0400975 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200976}
977
978/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000979bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
980 ram_addr_t length,
981 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200982{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000983 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000984 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000985 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200986
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000987 if (length == 0) {
988 return false;
989 }
990
991 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
992 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000993
994 rcu_read_lock();
995
996 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
997
998 while (page < end) {
999 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1000 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1001 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1002
1003 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1004 offset, num);
1005 page += num;
1006 }
1007
1008 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001009
1010 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001011 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001012 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001013
1014 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001015}
1016
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001017/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001018hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001019 MemoryRegionSection *section,
1020 target_ulong vaddr,
1021 hwaddr paddr, hwaddr xlat,
1022 int prot,
1023 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001024{
Avi Kivitya8170e52012-10-23 12:30:10 +02001025 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001026 CPUWatchpoint *wp;
1027
Blue Swirlcc5bea62012-04-14 14:56:48 +00001028 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001029 /* Normal RAM. */
1030 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001031 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001032 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001033 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001034 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001035 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001036 }
1037 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001038 AddressSpaceDispatch *d;
1039
1040 d = atomic_rcu_read(&section->address_space->dispatch);
1041 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001042 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001043 }
1044
1045 /* Make accesses to pages with watchpoints go via the
1046 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001047 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001048 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001049 /* Avoid trapping reads of pages with a write breakpoint. */
1050 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001051 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001052 *address |= TLB_MMIO;
1053 break;
1054 }
1055 }
1056 }
1057
1058 return iotlb;
1059}
bellard9fa3e852004-01-04 18:06:42 +00001060#endif /* defined(CONFIG_USER_ONLY) */
1061
pbrooke2eef172008-06-08 01:09:01 +00001062#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001063
Anthony Liguoric227f092009-10-01 16:12:16 -05001064static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001065 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001066static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001067
Igor Mammedova2b257d2014-10-31 16:38:37 +00001068static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1069 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001070
1071/*
1072 * Set a custom physical guest memory alloator.
1073 * Accelerators with unusual needs may need this. Hopefully, we can
1074 * get rid of it eventually.
1075 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001076void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001077{
1078 phys_mem_alloc = alloc;
1079}
1080
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001081static uint16_t phys_section_add(PhysPageMap *map,
1082 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001083{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001084 /* The physical section number is ORed with a page-aligned
1085 * pointer to produce the iotlb entries. Thus it should
1086 * never overflow into the page-aligned value.
1087 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001088 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001089
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001090 if (map->sections_nb == map->sections_nb_alloc) {
1091 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1092 map->sections = g_renew(MemoryRegionSection, map->sections,
1093 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001094 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001095 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001096 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001097 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001098}
1099
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001100static void phys_section_destroy(MemoryRegion *mr)
1101{
Don Slutz55b4e802015-11-30 17:11:04 -05001102 bool have_sub_page = mr->subpage;
1103
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001104 memory_region_unref(mr);
1105
Don Slutz55b4e802015-11-30 17:11:04 -05001106 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001107 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001108 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001109 g_free(subpage);
1110 }
1111}
1112
Paolo Bonzini60926662013-05-29 12:30:26 +02001113static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001114{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001115 while (map->sections_nb > 0) {
1116 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001117 phys_section_destroy(section->mr);
1118 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001119 g_free(map->sections);
1120 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001121}
1122
Avi Kivityac1970f2012-10-03 16:22:53 +02001123static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001124{
1125 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001126 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001127 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001128 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001129 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001130 MemoryRegionSection subsection = {
1131 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001132 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001133 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001134 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135
Avi Kivityf3705d52012-03-08 16:16:34 +02001136 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001137
Avi Kivityf3705d52012-03-08 16:16:34 +02001138 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001139 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001140 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001141 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001142 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001143 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001145 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 }
1147 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001149 subpage_register(subpage, start, end,
1150 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151}
1152
1153
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154static void register_multipage(AddressSpaceDispatch *d,
1155 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001156{
Avi Kivitya8170e52012-10-23 12:30:10 +02001157 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001158 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1160 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001161
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001162 assert(num_pages);
1163 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001164}
1165
Avi Kivityac1970f2012-10-03 16:22:53 +02001166static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001167{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001168 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001169 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001170 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001171 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001172
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001173 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1174 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1175 - now.offset_within_address_space;
1176
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001177 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001178 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001179 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001180 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001181 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001182 while (int128_ne(remain.size, now.size)) {
1183 remain.size = int128_sub(remain.size, now.size);
1184 remain.offset_within_address_space += int128_get64(now.size);
1185 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001186 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001187 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001188 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001189 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001190 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001191 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001192 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001194 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001195 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001196 }
1197}
1198
Sheng Yang62a27442010-01-26 19:21:16 +08001199void qemu_flush_coalesced_mmio_buffer(void)
1200{
1201 if (kvm_enabled())
1202 kvm_flush_coalesced_mmio_buffer();
1203}
1204
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001205void qemu_mutex_lock_ramlist(void)
1206{
1207 qemu_mutex_lock(&ram_list.mutex);
1208}
1209
1210void qemu_mutex_unlock_ramlist(void)
1211{
1212 qemu_mutex_unlock(&ram_list.mutex);
1213}
1214
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001215#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001216
1217#include <sys/vfs.h>
1218
1219#define HUGETLBFS_MAGIC 0x958458f6
1220
Hu Taofc7a5802014-09-09 13:28:01 +08001221static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001222{
1223 struct statfs fs;
1224 int ret;
1225
1226 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001227 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001228 } while (ret != 0 && errno == EINTR);
1229
1230 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001231 error_setg_errno(errp, errno, "failed to get page size of file %s",
1232 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001233 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001234 }
1235
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236 return fs.f_bsize;
1237}
1238
Alex Williamson04b16652010-07-02 11:13:17 -06001239static void *file_ram_alloc(RAMBlock *block,
1240 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001241 const char *path,
1242 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001243{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001244 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001245 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001246 char *sanitized_name;
1247 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001248 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001249 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001250 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001251 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001252
Hu Taofc7a5802014-09-09 13:28:01 +08001253 hpagesize = gethugepagesize(path, &local_err);
1254 if (local_err) {
1255 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001256 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001257 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001258 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001259
1260 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001261 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1262 "or larger than huge page size 0x%" PRIx64,
1263 memory, hpagesize);
1264 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265 }
1266
1267 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001268 error_setg(errp,
1269 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001270 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001271 }
1272
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001273 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1274 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1275 sanitized_name = g_strdup(memory_region_name(block->mr));
1276 for (c = sanitized_name; *c != '\0'; c++) {
1277 if (*c == '/') {
1278 *c = '_';
1279 }
1280 }
1281
1282 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1283 sanitized_name);
1284 g_free(sanitized_name);
1285
1286 fd = mkstemp(filename);
1287 if (fd >= 0) {
1288 unlink(filename);
1289 }
1290 g_free(filename);
1291 } else {
1292 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001293 }
1294
Marcelo Tosattic9027602010-03-01 20:25:08 -03001295 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001296 error_setg_errno(errp, errno,
1297 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001298 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001299 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001300
Chen Hanxiao9284f312015-07-24 11:12:03 +08001301 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001302
1303 /*
1304 * ftruncate is not supported by hugetlbfs in older
1305 * hosts, so don't bother bailing out on errors.
1306 * If anything goes wrong with it under other filesystems,
1307 * mmap will fail.
1308 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001309 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001310 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001311 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001312
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001313 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001314 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001315 error_setg_errno(errp, errno,
1316 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001317 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001318 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001319 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001320
1321 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001322 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001323 }
1324
Alex Williamson04b16652010-07-02 11:13:17 -06001325 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001326 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001327
1328error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001329 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001330}
1331#endif
1332
Mike Day0dc3f442013-09-05 14:41:35 -04001333/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001334static ram_addr_t find_ram_offset(ram_addr_t size)
1335{
Alex Williamson04b16652010-07-02 11:13:17 -06001336 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001337 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001338
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001339 assert(size != 0); /* it would hand out same offset multiple times */
1340
Mike Day0dc3f442013-09-05 14:41:35 -04001341 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001342 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001343 }
Alex Williamson04b16652010-07-02 11:13:17 -06001344
Mike Day0dc3f442013-09-05 14:41:35 -04001345 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001346 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001347
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001348 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001349
Mike Day0dc3f442013-09-05 14:41:35 -04001350 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001351 if (next_block->offset >= end) {
1352 next = MIN(next, next_block->offset);
1353 }
1354 }
1355 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001356 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001357 mingap = next - end;
1358 }
1359 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001360
1361 if (offset == RAM_ADDR_MAX) {
1362 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1363 (uint64_t)size);
1364 abort();
1365 }
1366
Alex Williamson04b16652010-07-02 11:13:17 -06001367 return offset;
1368}
1369
Juan Quintela652d7ec2012-07-20 10:37:54 +02001370ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001371{
Alex Williamsond17b5282010-06-25 11:08:38 -06001372 RAMBlock *block;
1373 ram_addr_t last = 0;
1374
Mike Day0dc3f442013-09-05 14:41:35 -04001375 rcu_read_lock();
1376 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001377 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001378 }
Mike Day0dc3f442013-09-05 14:41:35 -04001379 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001380 return last;
1381}
1382
Jason Baronddb97f12012-08-02 15:44:16 -04001383static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1384{
1385 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001386
1387 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001388 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001389 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1390 if (ret) {
1391 perror("qemu_madvise");
1392 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1393 "but dump_guest_core=off specified\n");
1394 }
1395 }
1396}
1397
Mike Day0dc3f442013-09-05 14:41:35 -04001398/* Called within an RCU critical section, or while the ramlist lock
1399 * is held.
1400 */
Hu Tao20cfe882014-04-02 15:13:26 +08001401static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001402{
Hu Tao20cfe882014-04-02 15:13:26 +08001403 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001404
Mike Day0dc3f442013-09-05 14:41:35 -04001405 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001406 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001407 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001408 }
1409 }
Hu Tao20cfe882014-04-02 15:13:26 +08001410
1411 return NULL;
1412}
1413
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001414const char *qemu_ram_get_idstr(RAMBlock *rb)
1415{
1416 return rb->idstr;
1417}
1418
Mike Dayae3a7042013-09-05 14:41:35 -04001419/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001420void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1421{
Mike Dayae3a7042013-09-05 14:41:35 -04001422 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001423
Mike Day0dc3f442013-09-05 14:41:35 -04001424 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001425 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001426 assert(new_block);
1427 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001428
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001429 if (dev) {
1430 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001431 if (id) {
1432 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001433 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001434 }
1435 }
1436 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1437
Mike Day0dc3f442013-09-05 14:41:35 -04001438 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001439 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001440 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1441 new_block->idstr);
1442 abort();
1443 }
1444 }
Mike Day0dc3f442013-09-05 14:41:35 -04001445 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001446}
1447
Mike Dayae3a7042013-09-05 14:41:35 -04001448/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001449void qemu_ram_unset_idstr(ram_addr_t addr)
1450{
Mike Dayae3a7042013-09-05 14:41:35 -04001451 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001452
Mike Dayae3a7042013-09-05 14:41:35 -04001453 /* FIXME: arch_init.c assumes that this is not called throughout
1454 * migration. Ignore the problem since hot-unplug during migration
1455 * does not work anyway.
1456 */
1457
Mike Day0dc3f442013-09-05 14:41:35 -04001458 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001459 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001460 if (block) {
1461 memset(block->idstr, 0, sizeof(block->idstr));
1462 }
Mike Day0dc3f442013-09-05 14:41:35 -04001463 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001464}
1465
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001466static int memory_try_enable_merging(void *addr, size_t len)
1467{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001468 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001469 /* disabled by the user */
1470 return 0;
1471 }
1472
1473 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1474}
1475
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001476/* Only legal before guest might have detected the memory size: e.g. on
1477 * incoming migration, or right after reset.
1478 *
1479 * As memory core doesn't know how is memory accessed, it is up to
1480 * resize callback to update device state and/or add assertions to detect
1481 * misuse, if necessary.
1482 */
1483int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1484{
1485 RAMBlock *block = find_ram_block(base);
1486
1487 assert(block);
1488
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001489 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001490
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001491 if (block->used_length == newsize) {
1492 return 0;
1493 }
1494
1495 if (!(block->flags & RAM_RESIZEABLE)) {
1496 error_setg_errno(errp, EINVAL,
1497 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1498 " in != 0x" RAM_ADDR_FMT, block->idstr,
1499 newsize, block->used_length);
1500 return -EINVAL;
1501 }
1502
1503 if (block->max_length < newsize) {
1504 error_setg_errno(errp, EINVAL,
1505 "Length too large: %s: 0x" RAM_ADDR_FMT
1506 " > 0x" RAM_ADDR_FMT, block->idstr,
1507 newsize, block->max_length);
1508 return -EINVAL;
1509 }
1510
1511 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1512 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001513 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1514 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001515 memory_region_set_size(block->mr, newsize);
1516 if (block->resized) {
1517 block->resized(block->idstr, newsize, block->host);
1518 }
1519 return 0;
1520}
1521
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001522/* Called with ram_list.mutex held */
1523static void dirty_memory_extend(ram_addr_t old_ram_size,
1524 ram_addr_t new_ram_size)
1525{
1526 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1527 DIRTY_MEMORY_BLOCK_SIZE);
1528 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1529 DIRTY_MEMORY_BLOCK_SIZE);
1530 int i;
1531
1532 /* Only need to extend if block count increased */
1533 if (new_num_blocks <= old_num_blocks) {
1534 return;
1535 }
1536
1537 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1538 DirtyMemoryBlocks *old_blocks;
1539 DirtyMemoryBlocks *new_blocks;
1540 int j;
1541
1542 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1543 new_blocks = g_malloc(sizeof(*new_blocks) +
1544 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1545
1546 if (old_num_blocks) {
1547 memcpy(new_blocks->blocks, old_blocks->blocks,
1548 old_num_blocks * sizeof(old_blocks->blocks[0]));
1549 }
1550
1551 for (j = old_num_blocks; j < new_num_blocks; j++) {
1552 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1553 }
1554
1555 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1556
1557 if (old_blocks) {
1558 g_free_rcu(old_blocks, rcu);
1559 }
1560 }
1561}
1562
Hu Taoef701d72014-09-09 13:27:54 +08001563static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001564{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001565 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001566 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001567 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001568 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001569
1570 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001571
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001572 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001573 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001574
1575 if (!new_block->host) {
1576 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001577 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001578 new_block->mr, &err);
1579 if (err) {
1580 error_propagate(errp, err);
1581 qemu_mutex_unlock_ramlist();
1582 return -1;
1583 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001584 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001585 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001586 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001587 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001588 error_setg_errno(errp, errno,
1589 "cannot set up guest memory '%s'",
1590 memory_region_name(new_block->mr));
1591 qemu_mutex_unlock_ramlist();
1592 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001593 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001594 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001595 }
1596 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001597
Li Zhijiandd631692015-07-02 20:18:06 +08001598 new_ram_size = MAX(old_ram_size,
1599 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1600 if (new_ram_size > old_ram_size) {
1601 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001602 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001603 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001604 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1605 * QLIST (which has an RCU-friendly variant) does not have insertion at
1606 * tail, so save the last element in last_block.
1607 */
Mike Day0dc3f442013-09-05 14:41:35 -04001608 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001609 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001610 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001611 break;
1612 }
1613 }
1614 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001615 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001616 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001617 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001618 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001619 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001620 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001621 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001622
Mike Day0dc3f442013-09-05 14:41:35 -04001623 /* Write list before version */
1624 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001625 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001626 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001627
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001628 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001629 new_block->used_length,
1630 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001631
Paolo Bonzinia904c912015-01-21 16:18:35 +01001632 if (new_block->host) {
1633 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1634 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1635 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1636 if (kvm_enabled()) {
1637 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1638 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001639 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001640
1641 return new_block->offset;
1642}
1643
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001644#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001645ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001646 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001647 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001648{
1649 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001650 ram_addr_t addr;
1651 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001652
1653 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001654 error_setg(errp, "-mem-path not supported with Xen");
1655 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001656 }
1657
1658 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1659 /*
1660 * file_ram_alloc() needs to allocate just like
1661 * phys_mem_alloc, but we haven't bothered to provide
1662 * a hook there.
1663 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001664 error_setg(errp,
1665 "-mem-path not supported with this accelerator");
1666 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001667 }
1668
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001669 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001670 new_block = g_malloc0(sizeof(*new_block));
1671 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001672 new_block->used_length = size;
1673 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001674 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001675 new_block->host = file_ram_alloc(new_block, size,
1676 mem_path, errp);
1677 if (!new_block->host) {
1678 g_free(new_block);
1679 return -1;
1680 }
1681
Hu Taoef701d72014-09-09 13:27:54 +08001682 addr = ram_block_add(new_block, &local_err);
1683 if (local_err) {
1684 g_free(new_block);
1685 error_propagate(errp, local_err);
1686 return -1;
1687 }
1688 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001689}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001690#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001691
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001692static
1693ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1694 void (*resized)(const char*,
1695 uint64_t length,
1696 void *host),
1697 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001698 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001699{
1700 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001701 ram_addr_t addr;
1702 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001703
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001704 size = HOST_PAGE_ALIGN(size);
1705 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001706 new_block = g_malloc0(sizeof(*new_block));
1707 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001708 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001709 new_block->used_length = size;
1710 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001711 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001712 new_block->fd = -1;
1713 new_block->host = host;
1714 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001715 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001716 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001717 if (resizeable) {
1718 new_block->flags |= RAM_RESIZEABLE;
1719 }
Hu Taoef701d72014-09-09 13:27:54 +08001720 addr = ram_block_add(new_block, &local_err);
1721 if (local_err) {
1722 g_free(new_block);
1723 error_propagate(errp, local_err);
1724 return -1;
1725 }
1726 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001727}
1728
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001729ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1730 MemoryRegion *mr, Error **errp)
1731{
1732 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1733}
1734
Hu Taoef701d72014-09-09 13:27:54 +08001735ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001736{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001737 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1738}
1739
1740ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1741 void (*resized)(const char*,
1742 uint64_t length,
1743 void *host),
1744 MemoryRegion *mr, Error **errp)
1745{
1746 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001747}
bellarde9a1ab12007-02-08 23:08:38 +00001748
Paolo Bonzini43771532013-09-09 17:58:40 +02001749static void reclaim_ramblock(RAMBlock *block)
1750{
1751 if (block->flags & RAM_PREALLOC) {
1752 ;
1753 } else if (xen_enabled()) {
1754 xen_invalidate_map_cache_entry(block->host);
1755#ifndef _WIN32
1756 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001757 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001758 close(block->fd);
1759#endif
1760 } else {
1761 qemu_anon_ram_free(block->host, block->max_length);
1762 }
1763 g_free(block);
1764}
1765
Anthony Liguoric227f092009-10-01 16:12:16 -05001766void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001767{
Alex Williamson04b16652010-07-02 11:13:17 -06001768 RAMBlock *block;
1769
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001770 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001771 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001772 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001773 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001774 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001775 /* Write list before version */
1776 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001777 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001778 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001779 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001780 }
1781 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001782 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001783}
1784
Huang Yingcd19cfa2011-03-02 08:56:19 +01001785#ifndef _WIN32
1786void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1787{
1788 RAMBlock *block;
1789 ram_addr_t offset;
1790 int flags;
1791 void *area, *vaddr;
1792
Mike Day0dc3f442013-09-05 14:41:35 -04001793 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001794 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001795 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001796 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001797 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001798 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001799 } else if (xen_enabled()) {
1800 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001801 } else {
1802 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001803 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001804 flags |= (block->flags & RAM_SHARED ?
1805 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001806 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1807 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001809 /*
1810 * Remap needs to match alloc. Accelerators that
1811 * set phys_mem_alloc never remap. If they did,
1812 * we'd need a remap hook here.
1813 */
1814 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1815
Huang Yingcd19cfa2011-03-02 08:56:19 +01001816 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1817 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1818 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001819 }
1820 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001821 fprintf(stderr, "Could not remap addr: "
1822 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001823 length, addr);
1824 exit(1);
1825 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001826 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001827 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001828 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001829 }
1830 }
1831}
1832#endif /* !_WIN32 */
1833
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001834int qemu_get_ram_fd(ram_addr_t addr)
1835{
Mike Dayae3a7042013-09-05 14:41:35 -04001836 RAMBlock *block;
1837 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001838
Mike Day0dc3f442013-09-05 14:41:35 -04001839 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001840 block = qemu_get_ram_block(addr);
1841 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001842 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001843 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001844}
1845
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001846void qemu_set_ram_fd(ram_addr_t addr, int fd)
1847{
1848 RAMBlock *block;
1849
1850 rcu_read_lock();
1851 block = qemu_get_ram_block(addr);
1852 block->fd = fd;
1853 rcu_read_unlock();
1854}
1855
Damjan Marion3fd74b82014-06-26 23:01:32 +02001856void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1857{
Mike Dayae3a7042013-09-05 14:41:35 -04001858 RAMBlock *block;
1859 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001860
Mike Day0dc3f442013-09-05 14:41:35 -04001861 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001862 block = qemu_get_ram_block(addr);
1863 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001864 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001865 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001866}
1867
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001868/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001869 * This should not be used for general purpose DMA. Use address_space_map
1870 * or address_space_rw instead. For local memory (e.g. video ram) that the
1871 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001872 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001873 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001874 */
1875void *qemu_get_ram_ptr(ram_addr_t addr)
1876{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001877 RAMBlock *block = qemu_get_ram_block(addr);
Mike Dayae3a7042013-09-05 14:41:35 -04001878
1879 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001880 /* We need to check if the requested address is in the RAM
1881 * because we don't want to map the entire memory in QEMU.
1882 * In that case just map until the end of the page.
1883 */
1884 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001885 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001886 }
Mike Dayae3a7042013-09-05 14:41:35 -04001887
1888 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001889 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001890 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001891}
1892
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001893/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001894 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001895 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001896 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001897 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001898static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001899{
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001900 RAMBlock *block;
1901 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001902 if (*size == 0) {
1903 return NULL;
1904 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001905
1906 block = qemu_get_ram_block(addr);
1907 offset_inside_block = addr - block->offset;
1908 *size = MIN(*size, block->max_length - offset_inside_block);
1909
1910 if (xen_enabled() && block->host == NULL) {
1911 /* We need to check if the requested address is in the RAM
1912 * because we don't want to map the entire memory in QEMU.
1913 * In that case just map the requested area.
1914 */
1915 if (block->offset == 0) {
1916 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001917 }
1918
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001919 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001920 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001921
1922 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001923}
1924
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001925/*
1926 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1927 * in that RAMBlock.
1928 *
1929 * ptr: Host pointer to look up
1930 * round_offset: If true round the result offset down to a page boundary
1931 * *ram_addr: set to result ram_addr
1932 * *offset: set to result offset within the RAMBlock
1933 *
1934 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001935 *
1936 * By the time this function returns, the returned pointer is not protected
1937 * by RCU anymore. If the caller is not within an RCU critical section and
1938 * does not hold the iothread lock, it must have other means of protecting the
1939 * pointer, such as a reference to the region that includes the incoming
1940 * ram_addr_t.
1941 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001942RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1943 ram_addr_t *ram_addr,
1944 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001945{
pbrook94a6b542009-04-11 17:15:54 +00001946 RAMBlock *block;
1947 uint8_t *host = ptr;
1948
Jan Kiszka868bb332011-06-21 22:59:09 +02001949 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001950 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001951 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001952 block = qemu_get_ram_block(*ram_addr);
1953 if (block) {
1954 *offset = (host - block->host);
1955 }
Mike Day0dc3f442013-09-05 14:41:35 -04001956 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001957 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001958 }
1959
Mike Day0dc3f442013-09-05 14:41:35 -04001960 rcu_read_lock();
1961 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001962 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001963 goto found;
1964 }
1965
Mike Day0dc3f442013-09-05 14:41:35 -04001966 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001967 /* This case append when the block is not mapped. */
1968 if (block->host == NULL) {
1969 continue;
1970 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001971 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001972 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001973 }
pbrook94a6b542009-04-11 17:15:54 +00001974 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001975
Mike Day0dc3f442013-09-05 14:41:35 -04001976 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001977 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001978
1979found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001980 *offset = (host - block->host);
1981 if (round_offset) {
1982 *offset &= TARGET_PAGE_MASK;
1983 }
1984 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001985 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001986 return block;
1987}
1988
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001989/*
1990 * Finds the named RAMBlock
1991 *
1992 * name: The name of RAMBlock to find
1993 *
1994 * Returns: RAMBlock (or NULL if not found)
1995 */
1996RAMBlock *qemu_ram_block_by_name(const char *name)
1997{
1998 RAMBlock *block;
1999
2000 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2001 if (!strcmp(name, block->idstr)) {
2002 return block;
2003 }
2004 }
2005
2006 return NULL;
2007}
2008
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002009/* Some of the softmmu routines need to translate from a host pointer
2010 (typically a TLB entry) back to a ram offset. */
2011MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2012{
2013 RAMBlock *block;
2014 ram_addr_t offset; /* Not used */
2015
2016 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2017
2018 if (!block) {
2019 return NULL;
2020 }
2021
2022 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002023}
Alex Williamsonf471a172010-06-11 11:11:42 -06002024
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002025/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002026static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002027 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002028{
Juan Quintela52159192013-10-08 12:44:04 +02002029 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002030 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002031 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002032 switch (size) {
2033 case 1:
2034 stb_p(qemu_get_ram_ptr(ram_addr), val);
2035 break;
2036 case 2:
2037 stw_p(qemu_get_ram_ptr(ram_addr), val);
2038 break;
2039 case 4:
2040 stl_p(qemu_get_ram_ptr(ram_addr), val);
2041 break;
2042 default:
2043 abort();
2044 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002045 /* Set both VGA and migration bits for simplicity and to remove
2046 * the notdirty callback faster.
2047 */
2048 cpu_physical_memory_set_dirty_range(ram_addr, size,
2049 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002050 /* we remove the notdirty callback only if the code has been
2051 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002052 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002053 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002054 }
bellard1ccde1c2004-02-06 19:46:14 +00002055}
2056
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002057static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2058 unsigned size, bool is_write)
2059{
2060 return is_write;
2061}
2062
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002063static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002064 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002065 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002066 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002067};
2068
pbrook0f459d12008-06-09 00:20:13 +00002069/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002070static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002071{
Andreas Färber93afead2013-08-26 03:41:01 +02002072 CPUState *cpu = current_cpu;
2073 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002074 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002075 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002076 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002077 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002078
Andreas Färberff4700b2013-08-26 18:23:18 +02002079 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002080 /* We re-entered the check after replacing the TB. Now raise
2081 * the debug interrupt so that is will trigger after the
2082 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002083 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002084 return;
2085 }
Andreas Färber93afead2013-08-26 03:41:01 +02002086 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002087 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002088 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2089 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002090 if (flags == BP_MEM_READ) {
2091 wp->flags |= BP_WATCHPOINT_HIT_READ;
2092 } else {
2093 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2094 }
2095 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002096 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002097 if (!cpu->watchpoint_hit) {
2098 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002099 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002100 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002101 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002102 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002103 } else {
2104 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002105 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002106 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002107 }
aliguori06d55cc2008-11-18 20:24:06 +00002108 }
aliguori6e140f22008-11-18 20:37:55 +00002109 } else {
2110 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002111 }
2112 }
2113}
2114
pbrook6658ffb2007-03-16 23:58:11 +00002115/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2116 so these check for a hit then pass through to the normal out-of-line
2117 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002118static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2119 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002120{
Peter Maydell66b9b432015-04-26 16:49:24 +01002121 MemTxResult res;
2122 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002123 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2124 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002125
Peter Maydell66b9b432015-04-26 16:49:24 +01002126 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002127 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002128 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002129 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002130 break;
2131 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002132 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002133 break;
2134 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002135 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002136 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002137 default: abort();
2138 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002139 *pdata = data;
2140 return res;
2141}
2142
2143static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2144 uint64_t val, unsigned size,
2145 MemTxAttrs attrs)
2146{
2147 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002148 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2149 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002150
2151 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2152 switch (size) {
2153 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002154 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002155 break;
2156 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002157 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002158 break;
2159 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002160 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002161 break;
2162 default: abort();
2163 }
2164 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002165}
2166
Avi Kivity1ec9b902012-01-02 12:47:48 +02002167static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002168 .read_with_attrs = watch_mem_read,
2169 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002170 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002171};
pbrook6658ffb2007-03-16 23:58:11 +00002172
Peter Maydellf25a49e2015-04-26 16:49:24 +01002173static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2174 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002175{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002177 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002178 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002179
blueswir1db7b5422007-05-26 17:36:03 +00002180#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002181 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002182 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002183#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002184 res = address_space_read(subpage->as, addr + subpage->base,
2185 attrs, buf, len);
2186 if (res) {
2187 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002188 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002189 switch (len) {
2190 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002191 *data = ldub_p(buf);
2192 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002193 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002194 *data = lduw_p(buf);
2195 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002196 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002197 *data = ldl_p(buf);
2198 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002199 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002200 *data = ldq_p(buf);
2201 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002202 default:
2203 abort();
2204 }
blueswir1db7b5422007-05-26 17:36:03 +00002205}
2206
Peter Maydellf25a49e2015-04-26 16:49:24 +01002207static MemTxResult subpage_write(void *opaque, hwaddr addr,
2208 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002209{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002210 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002211 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002212
blueswir1db7b5422007-05-26 17:36:03 +00002213#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002214 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002215 " value %"PRIx64"\n",
2216 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002217#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002218 switch (len) {
2219 case 1:
2220 stb_p(buf, value);
2221 break;
2222 case 2:
2223 stw_p(buf, value);
2224 break;
2225 case 4:
2226 stl_p(buf, value);
2227 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002228 case 8:
2229 stq_p(buf, value);
2230 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002231 default:
2232 abort();
2233 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002234 return address_space_write(subpage->as, addr + subpage->base,
2235 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002236}
2237
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002238static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002239 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002240{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002241 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002242#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002243 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002244 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002245#endif
2246
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002247 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002248 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002249}
2250
Avi Kivity70c68e42012-01-02 12:32:48 +02002251static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002252 .read_with_attrs = subpage_read,
2253 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002254 .impl.min_access_size = 1,
2255 .impl.max_access_size = 8,
2256 .valid.min_access_size = 1,
2257 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002258 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002259 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002260};
2261
Anthony Liguoric227f092009-10-01 16:12:16 -05002262static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002263 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002264{
2265 int idx, eidx;
2266
2267 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2268 return -1;
2269 idx = SUBPAGE_IDX(start);
2270 eidx = SUBPAGE_IDX(end);
2271#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002272 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2273 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002274#endif
blueswir1db7b5422007-05-26 17:36:03 +00002275 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002276 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002277 }
2278
2279 return 0;
2280}
2281
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002282static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002283{
Anthony Liguoric227f092009-10-01 16:12:16 -05002284 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002285
Anthony Liguori7267c092011-08-20 22:09:37 -05002286 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002287
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002288 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002289 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002290 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002291 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002292 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002293#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002294 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2295 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002296#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002297 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002298
2299 return mmio;
2300}
2301
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002302static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2303 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002304{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002305 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002306 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002307 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002308 .mr = mr,
2309 .offset_within_address_space = 0,
2310 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002311 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002312 };
2313
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002314 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002315}
2316
Peter Maydella54c87b2016-01-21 14:15:05 +00002317MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002318{
Peter Maydella54c87b2016-01-21 14:15:05 +00002319 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2320 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002321 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002322 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002323
2324 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002325}
2326
Avi Kivitye9179ce2009-06-14 11:38:52 +03002327static void io_mem_init(void)
2328{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002329 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002330 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002331 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002332 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002333 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002334 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002335 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002336}
2337
Avi Kivityac1970f2012-10-03 16:22:53 +02002338static void mem_begin(MemoryListener *listener)
2339{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002340 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002341 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2342 uint16_t n;
2343
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002344 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002345 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002346 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002347 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002348 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002349 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002350 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002351 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002352
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002353 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002354 d->as = as;
2355 as->next_dispatch = d;
2356}
2357
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002358static void address_space_dispatch_free(AddressSpaceDispatch *d)
2359{
2360 phys_sections_free(&d->map);
2361 g_free(d);
2362}
2363
Paolo Bonzini00752702013-05-29 12:13:54 +02002364static void mem_commit(MemoryListener *listener)
2365{
2366 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002367 AddressSpaceDispatch *cur = as->dispatch;
2368 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002369
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002370 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002371
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002372 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002373 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002374 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002375 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002376}
2377
Avi Kivity1d711482012-10-02 18:54:45 +02002378static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002379{
Peter Maydell32857f42015-10-01 15:29:50 +01002380 CPUAddressSpace *cpuas;
2381 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002382
2383 /* since each CPU stores ram addresses in its TLB cache, we must
2384 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002385 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2386 cpu_reloading_memory_map();
2387 /* The CPU and TLB are protected by the iothread lock.
2388 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2389 * may have split the RCU critical section.
2390 */
2391 d = atomic_rcu_read(&cpuas->as->dispatch);
2392 cpuas->memory_dispatch = d;
2393 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002394}
2395
Avi Kivityac1970f2012-10-03 16:22:53 +02002396void address_space_init_dispatch(AddressSpace *as)
2397{
Paolo Bonzini00752702013-05-29 12:13:54 +02002398 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002399 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002400 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002401 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002402 .region_add = mem_add,
2403 .region_nop = mem_add,
2404 .priority = 0,
2405 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002406 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002407}
2408
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002409void address_space_unregister(AddressSpace *as)
2410{
2411 memory_listener_unregister(&as->dispatch_listener);
2412}
2413
Avi Kivity83f3c252012-10-07 12:59:55 +02002414void address_space_destroy_dispatch(AddressSpace *as)
2415{
2416 AddressSpaceDispatch *d = as->dispatch;
2417
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002418 atomic_rcu_set(&as->dispatch, NULL);
2419 if (d) {
2420 call_rcu(d, address_space_dispatch_free, rcu);
2421 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002422}
2423
Avi Kivity62152b82011-07-26 14:26:14 +03002424static void memory_map_init(void)
2425{
Anthony Liguori7267c092011-08-20 22:09:37 -05002426 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002427
Paolo Bonzini57271d62013-11-07 17:14:37 +01002428 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002429 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002430
Anthony Liguori7267c092011-08-20 22:09:37 -05002431 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002432 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2433 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002434 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002435}
2436
2437MemoryRegion *get_system_memory(void)
2438{
2439 return system_memory;
2440}
2441
Avi Kivity309cb472011-08-08 16:09:03 +03002442MemoryRegion *get_system_io(void)
2443{
2444 return system_io;
2445}
2446
pbrooke2eef172008-06-08 01:09:01 +00002447#endif /* !defined(CONFIG_USER_ONLY) */
2448
bellard13eb76e2004-01-24 15:23:36 +00002449/* physical memory access (slow version, mainly for debug) */
2450#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002451int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002452 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002453{
2454 int l, flags;
2455 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002456 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002457
2458 while (len > 0) {
2459 page = addr & TARGET_PAGE_MASK;
2460 l = (page + TARGET_PAGE_SIZE) - addr;
2461 if (l > len)
2462 l = len;
2463 flags = page_get_flags(page);
2464 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002465 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002466 if (is_write) {
2467 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002468 return -1;
bellard579a97f2007-11-11 14:26:47 +00002469 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002470 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002471 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002472 memcpy(p, buf, l);
2473 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002474 } else {
2475 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002476 return -1;
bellard579a97f2007-11-11 14:26:47 +00002477 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002478 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002479 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002480 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002481 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002482 }
2483 len -= l;
2484 buf += l;
2485 addr += l;
2486 }
Paul Brooka68fe892010-03-01 00:08:59 +00002487 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002488}
bellard8df1cd02005-01-28 22:37:22 +00002489
bellard13eb76e2004-01-24 15:23:36 +00002490#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002491
Paolo Bonzini845b6212015-03-23 11:45:53 +01002492static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002493 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002494{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002495 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2496 /* No early return if dirty_log_mask is or becomes 0, because
2497 * cpu_physical_memory_set_dirty_range will still call
2498 * xen_modified_memory.
2499 */
2500 if (dirty_log_mask) {
2501 dirty_log_mask =
2502 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002503 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002504 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2505 tb_invalidate_phys_range(addr, addr + length);
2506 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2507 }
2508 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002509}
2510
Richard Henderson23326162013-07-08 14:55:59 -07002511static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002512{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002513 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002514
2515 /* Regions are assumed to support 1-4 byte accesses unless
2516 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002517 if (access_size_max == 0) {
2518 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002519 }
Richard Henderson23326162013-07-08 14:55:59 -07002520
2521 /* Bound the maximum access by the alignment of the address. */
2522 if (!mr->ops->impl.unaligned) {
2523 unsigned align_size_max = addr & -addr;
2524 if (align_size_max != 0 && align_size_max < access_size_max) {
2525 access_size_max = align_size_max;
2526 }
2527 }
2528
2529 /* Don't attempt accesses larger than the maximum. */
2530 if (l > access_size_max) {
2531 l = access_size_max;
2532 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002533 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002534
2535 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002536}
2537
Jan Kiszka4840f102015-06-18 18:47:22 +02002538static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002539{
Jan Kiszka4840f102015-06-18 18:47:22 +02002540 bool unlocked = !qemu_mutex_iothread_locked();
2541 bool release_lock = false;
2542
2543 if (unlocked && mr->global_locking) {
2544 qemu_mutex_lock_iothread();
2545 unlocked = false;
2546 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002547 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002548 if (mr->flush_coalesced_mmio) {
2549 if (unlocked) {
2550 qemu_mutex_lock_iothread();
2551 }
2552 qemu_flush_coalesced_mmio_buffer();
2553 if (unlocked) {
2554 qemu_mutex_unlock_iothread();
2555 }
2556 }
2557
2558 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002559}
2560
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002561/* Called within RCU critical section. */
2562static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2563 MemTxAttrs attrs,
2564 const uint8_t *buf,
2565 int len, hwaddr addr1,
2566 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002567{
bellard13eb76e2004-01-24 15:23:36 +00002568 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002569 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002570 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002571 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002572
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002573 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002574 if (!memory_access_is_direct(mr, true)) {
2575 release_lock |= prepare_mmio_access(mr);
2576 l = memory_access_size(mr, l, addr1);
2577 /* XXX: could force current_cpu to NULL to avoid
2578 potential bugs */
2579 switch (l) {
2580 case 8:
2581 /* 64 bit write access */
2582 val = ldq_p(buf);
2583 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2584 attrs);
2585 break;
2586 case 4:
2587 /* 32 bit write access */
2588 val = ldl_p(buf);
2589 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2590 attrs);
2591 break;
2592 case 2:
2593 /* 16 bit write access */
2594 val = lduw_p(buf);
2595 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2596 attrs);
2597 break;
2598 case 1:
2599 /* 8 bit write access */
2600 val = ldub_p(buf);
2601 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2602 attrs);
2603 break;
2604 default:
2605 abort();
bellard13eb76e2004-01-24 15:23:36 +00002606 }
2607 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002608 addr1 += memory_region_get_ram_addr(mr);
2609 /* RAM case */
2610 ptr = qemu_get_ram_ptr(addr1);
2611 memcpy(ptr, buf, l);
2612 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002613 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002614
2615 if (release_lock) {
2616 qemu_mutex_unlock_iothread();
2617 release_lock = false;
2618 }
2619
bellard13eb76e2004-01-24 15:23:36 +00002620 len -= l;
2621 buf += l;
2622 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002623
2624 if (!len) {
2625 break;
2626 }
2627
2628 l = len;
2629 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002630 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002631
Peter Maydell3b643492015-04-26 16:49:23 +01002632 return result;
bellard13eb76e2004-01-24 15:23:36 +00002633}
bellard8df1cd02005-01-28 22:37:22 +00002634
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002635MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2636 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002637{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002638 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002639 hwaddr addr1;
2640 MemoryRegion *mr;
2641 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002642
2643 if (len > 0) {
2644 rcu_read_lock();
2645 l = len;
2646 mr = address_space_translate(as, addr, &addr1, &l, true);
2647 result = address_space_write_continue(as, addr, attrs, buf, len,
2648 addr1, l, mr);
2649 rcu_read_unlock();
2650 }
2651
2652 return result;
2653}
2654
2655/* Called within RCU critical section. */
2656MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2657 MemTxAttrs attrs, uint8_t *buf,
2658 int len, hwaddr addr1, hwaddr l,
2659 MemoryRegion *mr)
2660{
2661 uint8_t *ptr;
2662 uint64_t val;
2663 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002664 bool release_lock = false;
2665
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002666 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002667 if (!memory_access_is_direct(mr, false)) {
2668 /* I/O case */
2669 release_lock |= prepare_mmio_access(mr);
2670 l = memory_access_size(mr, l, addr1);
2671 switch (l) {
2672 case 8:
2673 /* 64 bit read access */
2674 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2675 attrs);
2676 stq_p(buf, val);
2677 break;
2678 case 4:
2679 /* 32 bit read access */
2680 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2681 attrs);
2682 stl_p(buf, val);
2683 break;
2684 case 2:
2685 /* 16 bit read access */
2686 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2687 attrs);
2688 stw_p(buf, val);
2689 break;
2690 case 1:
2691 /* 8 bit read access */
2692 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2693 attrs);
2694 stb_p(buf, val);
2695 break;
2696 default:
2697 abort();
2698 }
2699 } else {
2700 /* RAM case */
2701 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2702 memcpy(buf, ptr, l);
2703 }
2704
2705 if (release_lock) {
2706 qemu_mutex_unlock_iothread();
2707 release_lock = false;
2708 }
2709
2710 len -= l;
2711 buf += l;
2712 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002713
2714 if (!len) {
2715 break;
2716 }
2717
2718 l = len;
2719 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002720 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002721
2722 return result;
2723}
2724
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002725MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2726 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002727{
2728 hwaddr l;
2729 hwaddr addr1;
2730 MemoryRegion *mr;
2731 MemTxResult result = MEMTX_OK;
2732
2733 if (len > 0) {
2734 rcu_read_lock();
2735 l = len;
2736 mr = address_space_translate(as, addr, &addr1, &l, false);
2737 result = address_space_read_continue(as, addr, attrs, buf, len,
2738 addr1, l, mr);
2739 rcu_read_unlock();
2740 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002741
2742 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002743}
2744
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002745MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2746 uint8_t *buf, int len, bool is_write)
2747{
2748 if (is_write) {
2749 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2750 } else {
2751 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2752 }
2753}
Avi Kivityac1970f2012-10-03 16:22:53 +02002754
Avi Kivitya8170e52012-10-23 12:30:10 +02002755void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002756 int len, int is_write)
2757{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002758 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2759 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002760}
2761
Alexander Graf582b55a2013-12-11 14:17:44 +01002762enum write_rom_type {
2763 WRITE_DATA,
2764 FLUSH_CACHE,
2765};
2766
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002767static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002768 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002769{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002770 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002771 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002772 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002773 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002774
Paolo Bonzini41063e12015-03-18 14:21:43 +01002775 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002776 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002777 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002778 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002779
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002780 if (!(memory_region_is_ram(mr) ||
2781 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002782 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002783 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002784 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002785 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002786 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002787 switch (type) {
2788 case WRITE_DATA:
2789 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002790 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002791 break;
2792 case FLUSH_CACHE:
2793 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2794 break;
2795 }
bellardd0ecd2a2006-04-23 17:14:48 +00002796 }
2797 len -= l;
2798 buf += l;
2799 addr += l;
2800 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002801 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002802}
2803
Alexander Graf582b55a2013-12-11 14:17:44 +01002804/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002805void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002806 const uint8_t *buf, int len)
2807{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002808 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002809}
2810
2811void cpu_flush_icache_range(hwaddr start, int len)
2812{
2813 /*
2814 * This function should do the same thing as an icache flush that was
2815 * triggered from within the guest. For TCG we are always cache coherent,
2816 * so there is no need to flush anything. For KVM / Xen we need to flush
2817 * the host's instruction cache at least.
2818 */
2819 if (tcg_enabled()) {
2820 return;
2821 }
2822
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002823 cpu_physical_memory_write_rom_internal(&address_space_memory,
2824 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002825}
2826
aliguori6d16c2f2009-01-22 16:59:11 +00002827typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002828 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002829 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002830 hwaddr addr;
2831 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002832 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002833} BounceBuffer;
2834
2835static BounceBuffer bounce;
2836
aliguoriba223c22009-01-22 16:59:16 +00002837typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002838 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002839 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002840} MapClient;
2841
Fam Zheng38e047b2015-03-16 17:03:35 +08002842QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002843static QLIST_HEAD(map_client_list, MapClient) map_client_list
2844 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002845
Fam Zhenge95205e2015-03-16 17:03:37 +08002846static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002847{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002848 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002849 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002850}
2851
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002852static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002853{
2854 MapClient *client;
2855
Blue Swirl72cf2d42009-09-12 07:36:22 +00002856 while (!QLIST_EMPTY(&map_client_list)) {
2857 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002858 qemu_bh_schedule(client->bh);
2859 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002860 }
2861}
2862
Fam Zhenge95205e2015-03-16 17:03:37 +08002863void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002864{
2865 MapClient *client = g_malloc(sizeof(*client));
2866
Fam Zheng38e047b2015-03-16 17:03:35 +08002867 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002868 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002869 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002870 if (!atomic_read(&bounce.in_use)) {
2871 cpu_notify_map_clients_locked();
2872 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002873 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002874}
2875
Fam Zheng38e047b2015-03-16 17:03:35 +08002876void cpu_exec_init_all(void)
2877{
2878 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002879 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002880 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002881 qemu_mutex_init(&map_client_list_lock);
2882}
2883
Fam Zhenge95205e2015-03-16 17:03:37 +08002884void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002885{
Fam Zhenge95205e2015-03-16 17:03:37 +08002886 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002887
Fam Zhenge95205e2015-03-16 17:03:37 +08002888 qemu_mutex_lock(&map_client_list_lock);
2889 QLIST_FOREACH(client, &map_client_list, link) {
2890 if (client->bh == bh) {
2891 cpu_unregister_map_client_do(client);
2892 break;
2893 }
2894 }
2895 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002896}
2897
2898static void cpu_notify_map_clients(void)
2899{
Fam Zheng38e047b2015-03-16 17:03:35 +08002900 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002901 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002902 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002903}
2904
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002905bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2906{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002907 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002908 hwaddr l, xlat;
2909
Paolo Bonzini41063e12015-03-18 14:21:43 +01002910 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002911 while (len > 0) {
2912 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002913 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2914 if (!memory_access_is_direct(mr, is_write)) {
2915 l = memory_access_size(mr, l, addr);
2916 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002917 return false;
2918 }
2919 }
2920
2921 len -= l;
2922 addr += l;
2923 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002924 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002925 return true;
2926}
2927
aliguori6d16c2f2009-01-22 16:59:11 +00002928/* Map a physical memory region into a host virtual address.
2929 * May map a subset of the requested range, given by and returned in *plen.
2930 * May return NULL if resources needed to perform the mapping are exhausted.
2931 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002932 * Use cpu_register_map_client() to know when retrying the map operation is
2933 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002934 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002935void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002936 hwaddr addr,
2937 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002938 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002939{
Avi Kivitya8170e52012-10-23 12:30:10 +02002940 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002941 hwaddr done = 0;
2942 hwaddr l, xlat, base;
2943 MemoryRegion *mr, *this_mr;
2944 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002945 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002946
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002947 if (len == 0) {
2948 return NULL;
2949 }
aliguori6d16c2f2009-01-22 16:59:11 +00002950
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002951 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002952 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002953 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002954
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002955 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002956 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002957 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002958 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002959 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002960 /* Avoid unbounded allocations */
2961 l = MIN(l, TARGET_PAGE_SIZE);
2962 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002963 bounce.addr = addr;
2964 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002965
2966 memory_region_ref(mr);
2967 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002968 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002969 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2970 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002971 }
aliguori6d16c2f2009-01-22 16:59:11 +00002972
Paolo Bonzini41063e12015-03-18 14:21:43 +01002973 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974 *plen = l;
2975 return bounce.buffer;
2976 }
2977
2978 base = xlat;
2979 raddr = memory_region_get_ram_addr(mr);
2980
2981 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002982 len -= l;
2983 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002984 done += l;
2985 if (len == 0) {
2986 break;
2987 }
2988
2989 l = len;
2990 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2991 if (this_mr != mr || xlat != base + done) {
2992 break;
2993 }
aliguori6d16c2f2009-01-22 16:59:11 +00002994 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002995
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002996 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002997 *plen = done;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002998 ptr = qemu_ram_ptr_length(raddr + base, plen);
2999 rcu_read_unlock();
3000
3001 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003002}
3003
Avi Kivityac1970f2012-10-03 16:22:53 +02003004/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003005 * Will also mark the memory as dirty if is_write == 1. access_len gives
3006 * the amount of memory that was actually read or written by the caller.
3007 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003008void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3009 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003010{
3011 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003012 MemoryRegion *mr;
3013 ram_addr_t addr1;
3014
3015 mr = qemu_ram_addr_from_host(buffer, &addr1);
3016 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003017 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003018 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003019 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003020 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003021 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003022 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003023 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003024 return;
3025 }
3026 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003027 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3028 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003029 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003030 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003031 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003032 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003033 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003034 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003035}
bellardd0ecd2a2006-04-23 17:14:48 +00003036
Avi Kivitya8170e52012-10-23 12:30:10 +02003037void *cpu_physical_memory_map(hwaddr addr,
3038 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003039 int is_write)
3040{
3041 return address_space_map(&address_space_memory, addr, plen, is_write);
3042}
3043
Avi Kivitya8170e52012-10-23 12:30:10 +02003044void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3045 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003046{
3047 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3048}
3049
bellard8df1cd02005-01-28 22:37:22 +00003050/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003051static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3052 MemTxAttrs attrs,
3053 MemTxResult *result,
3054 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003055{
bellard8df1cd02005-01-28 22:37:22 +00003056 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003057 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003058 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003059 hwaddr l = 4;
3060 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003061 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003062 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003063
Paolo Bonzini41063e12015-03-18 14:21:43 +01003064 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003065 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003066 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003067 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003068
bellard8df1cd02005-01-28 22:37:22 +00003069 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003070 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003071#if defined(TARGET_WORDS_BIGENDIAN)
3072 if (endian == DEVICE_LITTLE_ENDIAN) {
3073 val = bswap32(val);
3074 }
3075#else
3076 if (endian == DEVICE_BIG_ENDIAN) {
3077 val = bswap32(val);
3078 }
3079#endif
bellard8df1cd02005-01-28 22:37:22 +00003080 } else {
3081 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003082 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003083 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003084 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003085 switch (endian) {
3086 case DEVICE_LITTLE_ENDIAN:
3087 val = ldl_le_p(ptr);
3088 break;
3089 case DEVICE_BIG_ENDIAN:
3090 val = ldl_be_p(ptr);
3091 break;
3092 default:
3093 val = ldl_p(ptr);
3094 break;
3095 }
Peter Maydell50013112015-04-26 16:49:24 +01003096 r = MEMTX_OK;
3097 }
3098 if (result) {
3099 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003100 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003101 if (release_lock) {
3102 qemu_mutex_unlock_iothread();
3103 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003104 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003105 return val;
3106}
3107
Peter Maydell50013112015-04-26 16:49:24 +01003108uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3109 MemTxAttrs attrs, MemTxResult *result)
3110{
3111 return address_space_ldl_internal(as, addr, attrs, result,
3112 DEVICE_NATIVE_ENDIAN);
3113}
3114
3115uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3116 MemTxAttrs attrs, MemTxResult *result)
3117{
3118 return address_space_ldl_internal(as, addr, attrs, result,
3119 DEVICE_LITTLE_ENDIAN);
3120}
3121
3122uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3123 MemTxAttrs attrs, MemTxResult *result)
3124{
3125 return address_space_ldl_internal(as, addr, attrs, result,
3126 DEVICE_BIG_ENDIAN);
3127}
3128
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003129uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003130{
Peter Maydell50013112015-04-26 16:49:24 +01003131 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003132}
3133
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003134uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135{
Peter Maydell50013112015-04-26 16:49:24 +01003136 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003137}
3138
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003139uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003140{
Peter Maydell50013112015-04-26 16:49:24 +01003141 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142}
3143
bellard84b7b8e2005-11-28 21:19:04 +00003144/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003145static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3146 MemTxAttrs attrs,
3147 MemTxResult *result,
3148 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003149{
bellard84b7b8e2005-11-28 21:19:04 +00003150 uint8_t *ptr;
3151 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003152 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003153 hwaddr l = 8;
3154 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003155 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003156 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003157
Paolo Bonzini41063e12015-03-18 14:21:43 +01003158 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003159 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003160 false);
3161 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003162 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003163
bellard84b7b8e2005-11-28 21:19:04 +00003164 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003165 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003166#if defined(TARGET_WORDS_BIGENDIAN)
3167 if (endian == DEVICE_LITTLE_ENDIAN) {
3168 val = bswap64(val);
3169 }
3170#else
3171 if (endian == DEVICE_BIG_ENDIAN) {
3172 val = bswap64(val);
3173 }
3174#endif
bellard84b7b8e2005-11-28 21:19:04 +00003175 } else {
3176 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003177 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003178 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003179 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003180 switch (endian) {
3181 case DEVICE_LITTLE_ENDIAN:
3182 val = ldq_le_p(ptr);
3183 break;
3184 case DEVICE_BIG_ENDIAN:
3185 val = ldq_be_p(ptr);
3186 break;
3187 default:
3188 val = ldq_p(ptr);
3189 break;
3190 }
Peter Maydell50013112015-04-26 16:49:24 +01003191 r = MEMTX_OK;
3192 }
3193 if (result) {
3194 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003195 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003196 if (release_lock) {
3197 qemu_mutex_unlock_iothread();
3198 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003199 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003200 return val;
3201}
3202
Peter Maydell50013112015-04-26 16:49:24 +01003203uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3204 MemTxAttrs attrs, MemTxResult *result)
3205{
3206 return address_space_ldq_internal(as, addr, attrs, result,
3207 DEVICE_NATIVE_ENDIAN);
3208}
3209
3210uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3211 MemTxAttrs attrs, MemTxResult *result)
3212{
3213 return address_space_ldq_internal(as, addr, attrs, result,
3214 DEVICE_LITTLE_ENDIAN);
3215}
3216
3217uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3218 MemTxAttrs attrs, MemTxResult *result)
3219{
3220 return address_space_ldq_internal(as, addr, attrs, result,
3221 DEVICE_BIG_ENDIAN);
3222}
3223
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003224uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003225{
Peter Maydell50013112015-04-26 16:49:24 +01003226 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003227}
3228
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003229uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003230{
Peter Maydell50013112015-04-26 16:49:24 +01003231 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003232}
3233
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003234uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003235{
Peter Maydell50013112015-04-26 16:49:24 +01003236 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003237}
3238
bellardaab33092005-10-30 20:48:42 +00003239/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003240uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3241 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003242{
3243 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003244 MemTxResult r;
3245
3246 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3247 if (result) {
3248 *result = r;
3249 }
bellardaab33092005-10-30 20:48:42 +00003250 return val;
3251}
3252
Peter Maydell50013112015-04-26 16:49:24 +01003253uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3254{
3255 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3256}
3257
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003258/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003259static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3260 hwaddr addr,
3261 MemTxAttrs attrs,
3262 MemTxResult *result,
3263 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003264{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003265 uint8_t *ptr;
3266 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003267 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003268 hwaddr l = 2;
3269 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003270 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003271 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003272
Paolo Bonzini41063e12015-03-18 14:21:43 +01003273 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003274 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003275 false);
3276 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003277 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003278
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003279 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003280 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003281#if defined(TARGET_WORDS_BIGENDIAN)
3282 if (endian == DEVICE_LITTLE_ENDIAN) {
3283 val = bswap16(val);
3284 }
3285#else
3286 if (endian == DEVICE_BIG_ENDIAN) {
3287 val = bswap16(val);
3288 }
3289#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003290 } else {
3291 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003292 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003293 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003294 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003295 switch (endian) {
3296 case DEVICE_LITTLE_ENDIAN:
3297 val = lduw_le_p(ptr);
3298 break;
3299 case DEVICE_BIG_ENDIAN:
3300 val = lduw_be_p(ptr);
3301 break;
3302 default:
3303 val = lduw_p(ptr);
3304 break;
3305 }
Peter Maydell50013112015-04-26 16:49:24 +01003306 r = MEMTX_OK;
3307 }
3308 if (result) {
3309 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003310 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003311 if (release_lock) {
3312 qemu_mutex_unlock_iothread();
3313 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003314 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003315 return val;
bellardaab33092005-10-30 20:48:42 +00003316}
3317
Peter Maydell50013112015-04-26 16:49:24 +01003318uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3319 MemTxAttrs attrs, MemTxResult *result)
3320{
3321 return address_space_lduw_internal(as, addr, attrs, result,
3322 DEVICE_NATIVE_ENDIAN);
3323}
3324
3325uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3326 MemTxAttrs attrs, MemTxResult *result)
3327{
3328 return address_space_lduw_internal(as, addr, attrs, result,
3329 DEVICE_LITTLE_ENDIAN);
3330}
3331
3332uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3333 MemTxAttrs attrs, MemTxResult *result)
3334{
3335 return address_space_lduw_internal(as, addr, attrs, result,
3336 DEVICE_BIG_ENDIAN);
3337}
3338
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003339uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003340{
Peter Maydell50013112015-04-26 16:49:24 +01003341 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003342}
3343
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003344uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003345{
Peter Maydell50013112015-04-26 16:49:24 +01003346 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003347}
3348
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003349uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003350{
Peter Maydell50013112015-04-26 16:49:24 +01003351 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003352}
3353
bellard8df1cd02005-01-28 22:37:22 +00003354/* warning: addr must be aligned. The ram page is not masked as dirty
3355 and the code inside is not invalidated. It is useful if the dirty
3356 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003357void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3358 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003359{
bellard8df1cd02005-01-28 22:37:22 +00003360 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003361 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003362 hwaddr l = 4;
3363 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003364 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003365 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003366 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003367
Paolo Bonzini41063e12015-03-18 14:21:43 +01003368 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003369 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003370 true);
3371 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003372 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003373
Peter Maydell50013112015-04-26 16:49:24 +01003374 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003375 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003376 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003377 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003378 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003379
Paolo Bonzini845b6212015-03-23 11:45:53 +01003380 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3381 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003382 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003383 r = MEMTX_OK;
3384 }
3385 if (result) {
3386 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003387 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003388 if (release_lock) {
3389 qemu_mutex_unlock_iothread();
3390 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003391 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003392}
3393
Peter Maydell50013112015-04-26 16:49:24 +01003394void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3395{
3396 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3397}
3398
bellard8df1cd02005-01-28 22:37:22 +00003399/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003400static inline void address_space_stl_internal(AddressSpace *as,
3401 hwaddr addr, uint32_t val,
3402 MemTxAttrs attrs,
3403 MemTxResult *result,
3404 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003405{
bellard8df1cd02005-01-28 22:37:22 +00003406 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003407 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003408 hwaddr l = 4;
3409 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003410 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003411 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003412
Paolo Bonzini41063e12015-03-18 14:21:43 +01003413 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003414 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003415 true);
3416 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003417 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003418
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003419#if defined(TARGET_WORDS_BIGENDIAN)
3420 if (endian == DEVICE_LITTLE_ENDIAN) {
3421 val = bswap32(val);
3422 }
3423#else
3424 if (endian == DEVICE_BIG_ENDIAN) {
3425 val = bswap32(val);
3426 }
3427#endif
Peter Maydell50013112015-04-26 16:49:24 +01003428 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003429 } else {
bellard8df1cd02005-01-28 22:37:22 +00003430 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003431 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003432 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003433 switch (endian) {
3434 case DEVICE_LITTLE_ENDIAN:
3435 stl_le_p(ptr, val);
3436 break;
3437 case DEVICE_BIG_ENDIAN:
3438 stl_be_p(ptr, val);
3439 break;
3440 default:
3441 stl_p(ptr, val);
3442 break;
3443 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003444 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003445 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003446 }
Peter Maydell50013112015-04-26 16:49:24 +01003447 if (result) {
3448 *result = r;
3449 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003450 if (release_lock) {
3451 qemu_mutex_unlock_iothread();
3452 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003453 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003454}
3455
3456void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3457 MemTxAttrs attrs, MemTxResult *result)
3458{
3459 address_space_stl_internal(as, addr, val, attrs, result,
3460 DEVICE_NATIVE_ENDIAN);
3461}
3462
3463void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3464 MemTxAttrs attrs, MemTxResult *result)
3465{
3466 address_space_stl_internal(as, addr, val, attrs, result,
3467 DEVICE_LITTLE_ENDIAN);
3468}
3469
3470void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3471 MemTxAttrs attrs, MemTxResult *result)
3472{
3473 address_space_stl_internal(as, addr, val, attrs, result,
3474 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003475}
3476
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003477void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003478{
Peter Maydell50013112015-04-26 16:49:24 +01003479 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003480}
3481
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003482void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003483{
Peter Maydell50013112015-04-26 16:49:24 +01003484 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003485}
3486
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003487void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003488{
Peter Maydell50013112015-04-26 16:49:24 +01003489 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003490}
3491
bellardaab33092005-10-30 20:48:42 +00003492/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003493void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3494 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003495{
3496 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003497 MemTxResult r;
3498
3499 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3500 if (result) {
3501 *result = r;
3502 }
3503}
3504
3505void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3506{
3507 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003508}
3509
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003510/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003511static inline void address_space_stw_internal(AddressSpace *as,
3512 hwaddr addr, uint32_t val,
3513 MemTxAttrs attrs,
3514 MemTxResult *result,
3515 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003516{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003517 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003518 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003519 hwaddr l = 2;
3520 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003521 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003522 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003523
Paolo Bonzini41063e12015-03-18 14:21:43 +01003524 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003525 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003526 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003527 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003528
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003529#if defined(TARGET_WORDS_BIGENDIAN)
3530 if (endian == DEVICE_LITTLE_ENDIAN) {
3531 val = bswap16(val);
3532 }
3533#else
3534 if (endian == DEVICE_BIG_ENDIAN) {
3535 val = bswap16(val);
3536 }
3537#endif
Peter Maydell50013112015-04-26 16:49:24 +01003538 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003539 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003540 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003541 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003542 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003543 switch (endian) {
3544 case DEVICE_LITTLE_ENDIAN:
3545 stw_le_p(ptr, val);
3546 break;
3547 case DEVICE_BIG_ENDIAN:
3548 stw_be_p(ptr, val);
3549 break;
3550 default:
3551 stw_p(ptr, val);
3552 break;
3553 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003554 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003555 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003556 }
Peter Maydell50013112015-04-26 16:49:24 +01003557 if (result) {
3558 *result = r;
3559 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003560 if (release_lock) {
3561 qemu_mutex_unlock_iothread();
3562 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003563 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003564}
3565
3566void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3567 MemTxAttrs attrs, MemTxResult *result)
3568{
3569 address_space_stw_internal(as, addr, val, attrs, result,
3570 DEVICE_NATIVE_ENDIAN);
3571}
3572
3573void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3574 MemTxAttrs attrs, MemTxResult *result)
3575{
3576 address_space_stw_internal(as, addr, val, attrs, result,
3577 DEVICE_LITTLE_ENDIAN);
3578}
3579
3580void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3581 MemTxAttrs attrs, MemTxResult *result)
3582{
3583 address_space_stw_internal(as, addr, val, attrs, result,
3584 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003585}
3586
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003587void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003588{
Peter Maydell50013112015-04-26 16:49:24 +01003589 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003590}
3591
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003592void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003593{
Peter Maydell50013112015-04-26 16:49:24 +01003594 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003595}
3596
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003597void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003598{
Peter Maydell50013112015-04-26 16:49:24 +01003599 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003600}
3601
bellardaab33092005-10-30 20:48:42 +00003602/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003603void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3604 MemTxAttrs attrs, MemTxResult *result)
3605{
3606 MemTxResult r;
3607 val = tswap64(val);
3608 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3609 if (result) {
3610 *result = r;
3611 }
3612}
3613
3614void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3615 MemTxAttrs attrs, MemTxResult *result)
3616{
3617 MemTxResult r;
3618 val = cpu_to_le64(val);
3619 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3620 if (result) {
3621 *result = r;
3622 }
3623}
3624void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3625 MemTxAttrs attrs, MemTxResult *result)
3626{
3627 MemTxResult r;
3628 val = cpu_to_be64(val);
3629 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3630 if (result) {
3631 *result = r;
3632 }
3633}
3634
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003635void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003636{
Peter Maydell50013112015-04-26 16:49:24 +01003637 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003638}
3639
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003640void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003641{
Peter Maydell50013112015-04-26 16:49:24 +01003642 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003643}
3644
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003645void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003646{
Peter Maydell50013112015-04-26 16:49:24 +01003647 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003648}
3649
aliguori5e2972f2009-03-28 17:51:36 +00003650/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003651int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003652 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003653{
3654 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003655 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003656 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003657
3658 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003659 int asidx;
3660 MemTxAttrs attrs;
3661
bellard13eb76e2004-01-24 15:23:36 +00003662 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003663 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3664 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003665 /* if no physical page mapped, return an error */
3666 if (phys_addr == -1)
3667 return -1;
3668 l = (page + TARGET_PAGE_SIZE) - addr;
3669 if (l > len)
3670 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003671 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003672 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003673 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3674 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003675 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003676 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3677 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003678 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003679 }
bellard13eb76e2004-01-24 15:23:36 +00003680 len -= l;
3681 buf += l;
3682 addr += l;
3683 }
3684 return 0;
3685}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003686
3687/*
3688 * Allows code that needs to deal with migration bitmaps etc to still be built
3689 * target independent.
3690 */
3691size_t qemu_target_page_bits(void)
3692{
3693 return TARGET_PAGE_BITS;
3694}
3695
Paul Brooka68fe892010-03-01 00:08:59 +00003696#endif
bellard13eb76e2004-01-24 15:23:36 +00003697
Blue Swirl8e4a4242013-01-06 18:30:17 +00003698/*
3699 * A helper function for the _utterly broken_ virtio device model to find out if
3700 * it's running on a big endian machine. Don't do this at home kids!
3701 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003702bool target_words_bigendian(void);
3703bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003704{
3705#if defined(TARGET_WORDS_BIGENDIAN)
3706 return true;
3707#else
3708 return false;
3709#endif
3710}
3711
Wen Congyang76f35532012-05-07 12:04:18 +08003712#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003713bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003714{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003715 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003716 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003717 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003718
Paolo Bonzini41063e12015-03-18 14:21:43 +01003719 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003720 mr = address_space_translate(&address_space_memory,
3721 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003722
Paolo Bonzini41063e12015-03-18 14:21:43 +01003723 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3724 rcu_read_unlock();
3725 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003726}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003727
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003728int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003729{
3730 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003731 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003732
Mike Day0dc3f442013-09-05 14:41:35 -04003733 rcu_read_lock();
3734 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003735 ret = func(block->idstr, block->host, block->offset,
3736 block->used_length, opaque);
3737 if (ret) {
3738 break;
3739 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003740 }
Mike Day0dc3f442013-09-05 14:41:35 -04003741 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003742 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003743}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003744#endif