blob: a9d465b289de7cae7128125cc67b2ef43aca715e [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010027#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000028#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020029#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020031#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010032#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010033#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020042#include "hw/hw.h"
43#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010044#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020045#include "sysemu/dma.h"
46#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010047#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010048#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000049#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010050#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040051#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020052#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030054#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000055
Paolo Bonzini022c62c2012-12-17 18:19:49 +010056#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020057#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030058#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020059
Bharata B Rao9dfeca72016-05-12 09:18:12 +053060#include "migration/vmstate.h"
61
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030063#ifndef _WIN32
64#include "qemu/mmap-alloc.h"
65#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020066
blueswir1db7b5422007-05-26 17:36:03 +000067//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000068
pbrook99773bd2006-04-16 15:14:59 +000069#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040070/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
71 * are protected by the ramlist lock.
72 */
Mike Day0d53d9f2015-01-21 13:45:24 +010073RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030074
75static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030076static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030077
Avi Kivityf6790af2012-10-02 20:13:51 +020078AddressSpace address_space_io;
79AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020080
Paolo Bonzini0844e002013-05-24 14:37:28 +020081MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020082static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020083
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080084/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
85#define RAM_PREALLOC (1 << 0)
86
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080087/* RAM is mmap-ed with MAP_SHARED */
88#define RAM_SHARED (1 << 1)
89
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020090/* Only a portion of RAM (used_length) is actually used, and migrated.
91 * This used_length size can change across reboots.
92 */
93#define RAM_RESIZEABLE (1 << 2)
94
pbrooke2eef172008-06-08 01:09:01 +000095#endif
bellard9fa3e852004-01-04 18:06:42 +000096
Andreas Färberbdc44642013-06-24 23:50:24 +020097struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000098/* current CPU in the current thread. It is only valid inside
99 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +0200100__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000101/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000102 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000103 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100104int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000105
pbrooke2eef172008-06-08 01:09:01 +0000106#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200107
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108typedef struct PhysPageEntry PhysPageEntry;
109
110struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200113 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200114 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200115};
116
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200117#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100120#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200122#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100123#define P_L2_SIZE (1 << P_L2_BITS)
124
125#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126
127typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200128
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200129typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100130 struct rcu_head rcu;
131
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200132 unsigned sections_nb;
133 unsigned sections_nb_alloc;
134 unsigned nodes_nb;
135 unsigned nodes_nb_alloc;
136 Node *nodes;
137 MemoryRegionSection *sections;
138} PhysPageMap;
139
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200140struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100141 struct rcu_head rcu;
142
Fam Zheng729633c2016-03-01 14:18:24 +0800143 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200144 /* This is a multi-level map on the physical address space.
145 * The bottom level has pointers to MemoryRegionSections.
146 */
147 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200148 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200149 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200150};
151
Jan Kiszka90260c62013-05-26 21:46:51 +0200152#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
153typedef struct subpage_t {
154 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200155 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200156 hwaddr base;
157 uint16_t sub_section[TARGET_PAGE_SIZE];
158} subpage_t;
159
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200160#define PHYS_SECTION_UNASSIGNED 0
161#define PHYS_SECTION_NOTDIRTY 1
162#define PHYS_SECTION_ROM 2
163#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200164
pbrooke2eef172008-06-08 01:09:01 +0000165static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300166static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000167static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000168
Avi Kivity1ec9b902012-01-02 12:47:48 +0200169static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100170
171/**
172 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
173 * @cpu: the CPU whose AddressSpace this is
174 * @as: the AddressSpace itself
175 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
176 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 */
178struct CPUAddressSpace {
179 CPUState *cpu;
180 AddressSpace *as;
181 struct AddressSpaceDispatch *memory_dispatch;
182 MemoryListener tcg_as_listener;
183};
184
pbrook6658ffb2007-03-16 23:58:11 +0000185#endif
bellard54936002003-05-13 00:25:15 +0000186
Paul Brook6d9a1302010-02-28 23:55:53 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200188
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200195 }
196}
197
Paolo Bonzinidb946042015-05-21 15:12:29 +0200198static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199{
200 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200201 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200202 PhysPageEntry e;
203 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200205 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200206 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200207 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200208 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200209
210 e.skip = leaf ? 0 : 1;
211 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100212 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200213 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200215 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200216}
217
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200218static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
219 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200220 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221{
222 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100223 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200225 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200227 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200228 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200230
Paolo Bonzini03f49952013-11-07 17:14:36 +0100231 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200232 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200233 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200234 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200235 *index += step;
236 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200237 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200238 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200239 }
240 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200241 }
242}
243
Avi Kivityac1970f2012-10-03 16:22:53 +0200244static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200245 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200246 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000247{
Avi Kivity29990972012-02-13 20:21:20 +0200248 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000250
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200251 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000252}
253
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200254/* Compact a non leaf page entry. Simply detect that the entry has a single child,
255 * and update our entry so we can skip it and go directly to the destination.
256 */
257static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
258{
259 unsigned valid_ptr = P_L2_SIZE;
260 int valid = 0;
261 PhysPageEntry *p;
262 int i;
263
264 if (lp->ptr == PHYS_MAP_NODE_NIL) {
265 return;
266 }
267
268 p = nodes[lp->ptr];
269 for (i = 0; i < P_L2_SIZE; i++) {
270 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
271 continue;
272 }
273
274 valid_ptr = i;
275 valid++;
276 if (p[i].skip) {
277 phys_page_compact(&p[i], nodes, compacted);
278 }
279 }
280
281 /* We can only compress if there's only one child. */
282 if (valid != 1) {
283 return;
284 }
285
286 assert(valid_ptr < P_L2_SIZE);
287
288 /* Don't compress if it won't fit in the # of bits we have. */
289 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
290 return;
291 }
292
293 lp->ptr = p[valid_ptr].ptr;
294 if (!p[valid_ptr].skip) {
295 /* If our only child is a leaf, make this a leaf. */
296 /* By design, we should have made this node a leaf to begin with so we
297 * should never reach here.
298 * But since it's so simple to handle this, let's do it just in case we
299 * change this rule.
300 */
301 lp->skip = 0;
302 } else {
303 lp->skip += p[valid_ptr].skip;
304 }
305}
306
307static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
308{
309 DECLARE_BITMAP(compacted, nodes_nb);
310
311 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200312 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200313 }
314}
315
Fam Zheng29cb5332016-03-01 14:18:23 +0800316static inline bool section_covers_addr(const MemoryRegionSection *section,
317 hwaddr addr)
318{
319 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
320 * the section must cover the entire address space.
321 */
322 return section->size.hi ||
323 range_covers_byte(section->offset_within_address_space,
324 section->size.lo, addr);
325}
326
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200327static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200328 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000329{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200330 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200331 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200332 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200333
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200334 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200335 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200336 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200337 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200338 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100339 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200340 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200341
Fam Zheng29cb5332016-03-01 14:18:23 +0800342 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200343 return &sections[lp.ptr];
344 } else {
345 return &sections[PHYS_SECTION_UNASSIGNED];
346 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200347}
348
Blue Swirle5548612012-04-21 13:08:33 +0000349bool memory_region_is_unassigned(MemoryRegion *mr)
350{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200351 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000352 && mr != &io_mem_watch;
353}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200354
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100355/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200356static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200357 hwaddr addr,
358 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200359{
Fam Zheng729633c2016-03-01 14:18:24 +0800360 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200361 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800362 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200363
Fam Zheng729633c2016-03-01 14:18:24 +0800364 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
365 section_covers_addr(section, addr)) {
366 update = false;
367 } else {
368 section = phys_page_find(d->phys_map, addr, d->map.nodes,
369 d->map.sections);
370 update = true;
371 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200372 if (resolve_subpage && section->mr->subpage) {
373 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200374 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200375 }
Fam Zheng729633c2016-03-01 14:18:24 +0800376 if (update) {
377 atomic_set(&d->mru_section, section);
378 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200379 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200380}
381
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100382/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200383static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200384address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200385 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200386{
387 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100389 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200391 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200392 /* Compute offset within MemoryRegionSection */
393 addr -= section->offset_within_address_space;
394
395 /* Compute offset within MemoryRegion */
396 *xlat = addr + section->offset_within_region;
397
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200398 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200399
400 /* MMIO registers can be expected to perform full-width accesses based only
401 * on their address, without considering adjacent registers that could
402 * decode to completely different MemoryRegions. When such registers
403 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
404 * regions overlap wildly. For this reason we cannot clamp the accesses
405 * here.
406 *
407 * If the length is small (as is the case for address_space_ldl/stl),
408 * everything works fine. If the incoming length is large, however,
409 * the caller really has to do the clamping through memory_access_size.
410 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200411 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200412 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200413 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
414 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200415 return section;
416}
Jan Kiszka90260c62013-05-26 21:46:51 +0200417
Paolo Bonzini41063e12015-03-18 14:21:43 +0100418/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200419MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
420 hwaddr *xlat, hwaddr *plen,
421 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200422{
Avi Kivity30951152012-10-30 13:47:46 +0200423 IOMMUTLBEntry iotlb;
424 MemoryRegionSection *section;
425 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200426
427 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100428 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
429 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200430 mr = section->mr;
431
432 if (!mr->iommu_ops) {
433 break;
434 }
435
Le Tan8d7b8cb2014-08-16 13:55:37 +0800436 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200437 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
438 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700439 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200440 if (!(iotlb.perm & (1 << is_write))) {
441 mr = &io_mem_unassigned;
442 break;
443 }
444
445 as = iotlb.target_as;
446 }
447
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000448 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100449 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700450 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100451 }
452
Avi Kivity30951152012-10-30 13:47:46 +0200453 *xlat = addr;
454 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200455}
456
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100457/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200458MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000459address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200460 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200461{
Avi Kivity30951152012-10-30 13:47:46 +0200462 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000463 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
464
465 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200466
467 assert(!section->mr->iommu_ops);
468 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200469}
bellard9fa3e852004-01-04 18:06:42 +0000470#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000471
Andreas Färberb170fce2013-01-20 20:23:22 +0100472#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000473
Juan Quintelae59fb372009-09-29 22:48:21 +0200474static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200475{
Andreas Färber259186a2013-01-17 18:51:17 +0100476 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200477
aurel323098dba2009-03-07 21:28:24 +0000478 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
479 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100480 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100481 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000482
483 return 0;
484}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200485
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400486static int cpu_common_pre_load(void *opaque)
487{
488 CPUState *cpu = opaque;
489
Paolo Bonziniadee6422014-12-19 12:53:14 +0100490 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491
492 return 0;
493}
494
495static bool cpu_common_exception_index_needed(void *opaque)
496{
497 CPUState *cpu = opaque;
498
Paolo Bonziniadee6422014-12-19 12:53:14 +0100499 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400500}
501
502static const VMStateDescription vmstate_cpu_common_exception_index = {
503 .name = "cpu_common/exception_index",
504 .version_id = 1,
505 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200506 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400507 .fields = (VMStateField[]) {
508 VMSTATE_INT32(exception_index, CPUState),
509 VMSTATE_END_OF_LIST()
510 }
511};
512
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300513static bool cpu_common_crash_occurred_needed(void *opaque)
514{
515 CPUState *cpu = opaque;
516
517 return cpu->crash_occurred;
518}
519
520static const VMStateDescription vmstate_cpu_common_crash_occurred = {
521 .name = "cpu_common/crash_occurred",
522 .version_id = 1,
523 .minimum_version_id = 1,
524 .needed = cpu_common_crash_occurred_needed,
525 .fields = (VMStateField[]) {
526 VMSTATE_BOOL(crash_occurred, CPUState),
527 VMSTATE_END_OF_LIST()
528 }
529};
530
Andreas Färber1a1562f2013-06-17 04:09:11 +0200531const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 .name = "cpu_common",
533 .version_id = 1,
534 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400535 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200536 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200537 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100538 VMSTATE_UINT32(halted, CPUState),
539 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200540 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400541 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200542 .subsections = (const VMStateDescription*[]) {
543 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300544 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200545 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200546 }
547};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200548
pbrook9656f322008-07-01 20:01:19 +0000549#endif
550
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100551CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400552{
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400554
Andreas Färberbdc44642013-06-24 23:50:24 +0200555 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200557 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100558 }
Glauber Costa950f1472009-06-09 12:15:18 -0400559 }
560
Andreas Färberbdc44642013-06-24 23:50:24 +0200561 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400562}
563
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000564#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000565void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000566{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000567 CPUAddressSpace *newas;
568
569 /* Target code should have set num_ases before calling us */
570 assert(asidx < cpu->num_ases);
571
Peter Maydell56943e82016-01-21 14:15:04 +0000572 if (asidx == 0) {
573 /* address space 0 gets the convenience alias */
574 cpu->as = as;
575 }
576
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000577 /* KVM cannot currently support multiple address spaces. */
578 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000579
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000580 if (!cpu->cpu_ases) {
581 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000582 }
Peter Maydell32857f42015-10-01 15:29:50 +0100583
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000584 newas = &cpu->cpu_ases[asidx];
585 newas->cpu = cpu;
586 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000587 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000588 newas->tcg_as_listener.commit = tcg_commit;
589 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000590 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000591}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000592
593AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
594{
595 /* Return the AddressSpace corresponding to the specified index */
596 return cpu->cpu_ases[asidx].as;
597}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000598#endif
599
Bharata B Raob7bca732015-06-23 19:31:13 -0700600#ifndef CONFIG_USER_ONLY
601static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
602
603static int cpu_get_free_index(Error **errp)
604{
605 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
606
607 if (cpu >= MAX_CPUMASK_BITS) {
608 error_setg(errp, "Trying to use more CPUs than max of %d",
609 MAX_CPUMASK_BITS);
610 return -1;
611 }
612
613 bitmap_set(cpu_index_map, cpu, 1);
614 return cpu;
615}
616
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530617static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700618{
Bharata B Raob7bca732015-06-23 19:31:13 -0700619 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
Bharata B Raob7bca732015-06-23 19:31:13 -0700620}
621#else
622
623static int cpu_get_free_index(Error **errp)
624{
625 CPUState *some_cpu;
626 int cpu_index = 0;
627
628 CPU_FOREACH(some_cpu) {
629 cpu_index++;
630 }
631 return cpu_index;
632}
633
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700635{
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530636 return;
Bharata B Raob7bca732015-06-23 19:31:13 -0700637}
638#endif
639
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530640void cpu_exec_exit(CPUState *cpu)
641{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530642 CPUClass *cc = CPU_GET_CLASS(cpu);
643
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530644#if defined(CONFIG_USER_ONLY)
645 cpu_list_lock();
646#endif
647 if (cpu->cpu_index == -1) {
648 /* cpu_index was never allocated by this @cpu or was already freed. */
649#if defined(CONFIG_USER_ONLY)
650 cpu_list_unlock();
651#endif
652 return;
653 }
654
655 QTAILQ_REMOVE(&cpus, cpu, node);
656 cpu_release_index(cpu);
657 cpu->cpu_index = -1;
658#if defined(CONFIG_USER_ONLY)
659 cpu_list_unlock();
660#endif
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530661
662 if (cc->vmsd != NULL) {
663 vmstate_unregister(NULL, cc->vmsd, cpu);
664 }
665 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
666 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
667 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530668}
669
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700670void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000671{
Andreas Färberb170fce2013-01-20 20:23:22 +0100672 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700673 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000674
Peter Maydell56943e82016-01-21 14:15:04 +0000675 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000676 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000677
Eduardo Habkost291135b2015-04-27 17:00:33 -0300678#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300679 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000680
681 /* This is a softmmu CPU object, so create a property for it
682 * so users can wire up its memory. (This can't go in qom/cpu.c
683 * because that file is compiled only once for both user-mode
684 * and system builds.) The default if no link is set up is to use
685 * the system address space.
686 */
687 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
688 (Object **)&cpu->memory,
689 qdev_prop_allow_set_link_before_realize,
690 OBJ_PROP_LINK_UNREF_ON_RELEASE,
691 &error_abort);
692 cpu->memory = system_memory;
693 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300694#endif
695
pbrookc2764712009-03-07 15:24:59 +0000696#if defined(CONFIG_USER_ONLY)
697 cpu_list_lock();
698#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200699 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700700 if (local_err) {
701 error_propagate(errp, local_err);
702#if defined(CONFIG_USER_ONLY)
703 cpu_list_unlock();
704#endif
705 return;
bellard6a00d602005-11-21 23:25:50 +0000706 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200707 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000708#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200709 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000710 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200711#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200712 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200713 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200714 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100715 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200716 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100717 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200718#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000719}
720
Paul Brook94df27f2010-02-28 23:47:45 +0000721#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200722static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000723{
724 tb_invalidate_phys_page_range(pc, pc + 1, 0);
725}
726#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200727static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400728{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000729 MemTxAttrs attrs;
730 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
731 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400732 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000733 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100734 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400735 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400736}
bellardc27004e2005-01-03 23:35:10 +0000737#endif
bellardd720b932004-04-25 17:57:43 +0000738
Paul Brookc527ee82010-03-01 03:31:14 +0000739#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200740void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000741
742{
743}
744
Peter Maydell3ee887e2014-09-12 14:06:48 +0100745int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
746 int flags)
747{
748 return -ENOSYS;
749}
750
751void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
752{
753}
754
Andreas Färber75a34032013-09-02 16:57:02 +0200755int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000756 int flags, CPUWatchpoint **watchpoint)
757{
758 return -ENOSYS;
759}
760#else
pbrook6658ffb2007-03-16 23:58:11 +0000761/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200762int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000763 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000764{
aliguoric0ce9982008-11-25 22:13:57 +0000765 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000766
Peter Maydell05068c02014-09-12 14:06:48 +0100767 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700768 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200769 error_report("tried to set invalid watchpoint at %"
770 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000771 return -EINVAL;
772 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500773 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000774
aliguoria1d1bb32008-11-18 20:07:32 +0000775 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100776 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000777 wp->flags = flags;
778
aliguori2dc9f412008-11-18 20:56:59 +0000779 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200780 if (flags & BP_GDB) {
781 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
782 } else {
783 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
784 }
aliguoria1d1bb32008-11-18 20:07:32 +0000785
Andreas Färber31b030d2013-09-04 01:29:02 +0200786 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000787
788 if (watchpoint)
789 *watchpoint = wp;
790 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000791}
792
aliguoria1d1bb32008-11-18 20:07:32 +0000793/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200794int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000795 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000796{
aliguoria1d1bb32008-11-18 20:07:32 +0000797 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000798
Andreas Färberff4700b2013-08-26 18:23:18 +0200799 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100800 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000801 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200802 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000803 return 0;
804 }
805 }
aliguoria1d1bb32008-11-18 20:07:32 +0000806 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000807}
808
aliguoria1d1bb32008-11-18 20:07:32 +0000809/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200810void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000811{
Andreas Färberff4700b2013-08-26 18:23:18 +0200812 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000813
Andreas Färber31b030d2013-09-04 01:29:02 +0200814 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000815
Anthony Liguori7267c092011-08-20 22:09:37 -0500816 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000817}
818
aliguoria1d1bb32008-11-18 20:07:32 +0000819/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200820void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000821{
aliguoric0ce9982008-11-25 22:13:57 +0000822 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000823
Andreas Färberff4700b2013-08-26 18:23:18 +0200824 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200825 if (wp->flags & mask) {
826 cpu_watchpoint_remove_by_ref(cpu, wp);
827 }
aliguoric0ce9982008-11-25 22:13:57 +0000828 }
aliguoria1d1bb32008-11-18 20:07:32 +0000829}
Peter Maydell05068c02014-09-12 14:06:48 +0100830
831/* Return true if this watchpoint address matches the specified
832 * access (ie the address range covered by the watchpoint overlaps
833 * partially or completely with the address range covered by the
834 * access).
835 */
836static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
837 vaddr addr,
838 vaddr len)
839{
840 /* We know the lengths are non-zero, but a little caution is
841 * required to avoid errors in the case where the range ends
842 * exactly at the top of the address space and so addr + len
843 * wraps round to zero.
844 */
845 vaddr wpend = wp->vaddr + wp->len - 1;
846 vaddr addrend = addr + len - 1;
847
848 return !(addr > wpend || wp->vaddr > addrend);
849}
850
Paul Brookc527ee82010-03-01 03:31:14 +0000851#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000852
853/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200854int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000855 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000856{
aliguoric0ce9982008-11-25 22:13:57 +0000857 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000858
Anthony Liguori7267c092011-08-20 22:09:37 -0500859 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000860
861 bp->pc = pc;
862 bp->flags = flags;
863
aliguori2dc9f412008-11-18 20:56:59 +0000864 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200865 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200866 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200867 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200868 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200869 }
aliguoria1d1bb32008-11-18 20:07:32 +0000870
Andreas Färberf0c3c502013-08-26 21:22:53 +0200871 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000872
Andreas Färber00b941e2013-06-29 18:55:54 +0200873 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000874 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200875 }
aliguoria1d1bb32008-11-18 20:07:32 +0000876 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000877}
878
879/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200880int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000881{
aliguoria1d1bb32008-11-18 20:07:32 +0000882 CPUBreakpoint *bp;
883
Andreas Färberf0c3c502013-08-26 21:22:53 +0200884 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000885 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200886 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000887 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000888 }
bellard4c3a88a2003-07-26 12:06:08 +0000889 }
aliguoria1d1bb32008-11-18 20:07:32 +0000890 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000891}
892
aliguoria1d1bb32008-11-18 20:07:32 +0000893/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200894void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000895{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200896 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
897
898 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000899
Anthony Liguori7267c092011-08-20 22:09:37 -0500900 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000901}
902
903/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200904void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000905{
aliguoric0ce9982008-11-25 22:13:57 +0000906 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000907
Andreas Färberf0c3c502013-08-26 21:22:53 +0200908 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200909 if (bp->flags & mask) {
910 cpu_breakpoint_remove_by_ref(cpu, bp);
911 }
aliguoric0ce9982008-11-25 22:13:57 +0000912 }
bellard4c3a88a2003-07-26 12:06:08 +0000913}
914
bellardc33a3462003-07-29 20:50:33 +0000915/* enable or disable single step mode. EXCP_DEBUG is returned by the
916 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200917void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000918{
Andreas Färbered2803d2013-06-21 20:20:45 +0200919 if (cpu->singlestep_enabled != enabled) {
920 cpu->singlestep_enabled = enabled;
921 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200922 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200923 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100924 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000925 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700926 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000927 }
bellardc33a3462003-07-29 20:50:33 +0000928 }
bellardc33a3462003-07-29 20:50:33 +0000929}
930
Andreas Färbera47dddd2013-09-03 17:38:47 +0200931void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000932{
933 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000934 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000935
936 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000937 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000938 fprintf(stderr, "qemu: fatal: ");
939 vfprintf(stderr, fmt, ap);
940 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200941 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100942 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000943 qemu_log("qemu: fatal: ");
944 qemu_log_vprintf(fmt, ap2);
945 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200946 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000947 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000948 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000949 }
pbrook493ae1f2007-11-23 16:53:59 +0000950 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000951 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300952 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200953#if defined(CONFIG_USER_ONLY)
954 {
955 struct sigaction act;
956 sigfillset(&act.sa_mask);
957 act.sa_handler = SIG_DFL;
958 sigaction(SIGABRT, &act, NULL);
959 }
960#endif
bellard75012672003-06-21 13:11:07 +0000961 abort();
962}
963
bellard01243112004-01-04 15:48:17 +0000964#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400965/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200966static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
967{
968 RAMBlock *block;
969
Paolo Bonzini43771532013-09-09 17:58:40 +0200970 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200971 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200972 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200973 }
Mike Day0dc3f442013-09-05 14:41:35 -0400974 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200975 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200976 goto found;
977 }
978 }
979
980 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
981 abort();
982
983found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200984 /* It is safe to write mru_block outside the iothread lock. This
985 * is what happens:
986 *
987 * mru_block = xxx
988 * rcu_read_unlock()
989 * xxx removed from list
990 * rcu_read_lock()
991 * read mru_block
992 * mru_block = NULL;
993 * call_rcu(reclaim_ramblock, xxx);
994 * rcu_read_unlock()
995 *
996 * atomic_rcu_set is not needed here. The block was already published
997 * when it was placed into the list. Here we're just making an extra
998 * copy of the pointer.
999 */
Paolo Bonzini041603f2013-09-09 17:49:45 +02001000 ram_list.mru_block = block;
1001 return block;
1002}
1003
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001004static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001005{
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001006 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +02001007 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001008 RAMBlock *block;
1009 ram_addr_t end;
1010
1011 end = TARGET_PAGE_ALIGN(start + length);
1012 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +00001013
Mike Day0dc3f442013-09-05 14:41:35 -04001014 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +02001015 block = qemu_get_ram_block(start);
1016 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001017 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001018 CPU_FOREACH(cpu) {
1019 tlb_reset_dirty(cpu, start1, length);
1020 }
Mike Day0dc3f442013-09-05 14:41:35 -04001021 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +02001022}
1023
1024/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001025bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1026 ram_addr_t length,
1027 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001028{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001029 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001030 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001031 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001032
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001033 if (length == 0) {
1034 return false;
1035 }
1036
1037 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1038 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001039
1040 rcu_read_lock();
1041
1042 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1043
1044 while (page < end) {
1045 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1046 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1047 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1048
1049 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1050 offset, num);
1051 page += num;
1052 }
1053
1054 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001055
1056 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001057 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001058 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001059
1060 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001061}
1062
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001063/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001064hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001065 MemoryRegionSection *section,
1066 target_ulong vaddr,
1067 hwaddr paddr, hwaddr xlat,
1068 int prot,
1069 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001070{
Avi Kivitya8170e52012-10-23 12:30:10 +02001071 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001072 CPUWatchpoint *wp;
1073
Blue Swirlcc5bea62012-04-14 14:56:48 +00001074 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001075 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001076 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001077 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001078 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001079 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001080 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001081 }
1082 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001083 AddressSpaceDispatch *d;
1084
1085 d = atomic_rcu_read(&section->address_space->dispatch);
1086 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001087 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001088 }
1089
1090 /* Make accesses to pages with watchpoints go via the
1091 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001092 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001093 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001094 /* Avoid trapping reads of pages with a write breakpoint. */
1095 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001096 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001097 *address |= TLB_MMIO;
1098 break;
1099 }
1100 }
1101 }
1102
1103 return iotlb;
1104}
bellard9fa3e852004-01-04 18:06:42 +00001105#endif /* defined(CONFIG_USER_ONLY) */
1106
pbrooke2eef172008-06-08 01:09:01 +00001107#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001108
Anthony Liguoric227f092009-10-01 16:12:16 -05001109static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001110 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001111static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001112
Igor Mammedova2b257d2014-10-31 16:38:37 +00001113static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1114 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001115
1116/*
1117 * Set a custom physical guest memory alloator.
1118 * Accelerators with unusual needs may need this. Hopefully, we can
1119 * get rid of it eventually.
1120 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001121void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001122{
1123 phys_mem_alloc = alloc;
1124}
1125
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001126static uint16_t phys_section_add(PhysPageMap *map,
1127 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001128{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001129 /* The physical section number is ORed with a page-aligned
1130 * pointer to produce the iotlb entries. Thus it should
1131 * never overflow into the page-aligned value.
1132 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001133 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001134
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001135 if (map->sections_nb == map->sections_nb_alloc) {
1136 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1137 map->sections = g_renew(MemoryRegionSection, map->sections,
1138 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001139 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001140 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001141 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001142 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001143}
1144
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001145static void phys_section_destroy(MemoryRegion *mr)
1146{
Don Slutz55b4e802015-11-30 17:11:04 -05001147 bool have_sub_page = mr->subpage;
1148
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001149 memory_region_unref(mr);
1150
Don Slutz55b4e802015-11-30 17:11:04 -05001151 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001152 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001153 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001154 g_free(subpage);
1155 }
1156}
1157
Paolo Bonzini60926662013-05-29 12:30:26 +02001158static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001159{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001160 while (map->sections_nb > 0) {
1161 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001162 phys_section_destroy(section->mr);
1163 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001164 g_free(map->sections);
1165 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001166}
1167
Avi Kivityac1970f2012-10-03 16:22:53 +02001168static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001169{
1170 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001171 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001172 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001173 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001174 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001175 MemoryRegionSection subsection = {
1176 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001177 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001178 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001179 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001180
Avi Kivityf3705d52012-03-08 16:16:34 +02001181 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001182
Avi Kivityf3705d52012-03-08 16:16:34 +02001183 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001184 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001185 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001186 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001187 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001188 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001189 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001190 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001191 }
1192 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001194 subpage_register(subpage, start, end,
1195 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001196}
1197
1198
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001199static void register_multipage(AddressSpaceDispatch *d,
1200 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001201{
Avi Kivitya8170e52012-10-23 12:30:10 +02001202 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001203 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001204 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1205 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001206
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001207 assert(num_pages);
1208 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001209}
1210
Avi Kivityac1970f2012-10-03 16:22:53 +02001211static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001212{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001213 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001214 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001215 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001216 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001217
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001218 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1219 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1220 - now.offset_within_address_space;
1221
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001222 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001223 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001224 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001225 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001226 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001227 while (int128_ne(remain.size, now.size)) {
1228 remain.size = int128_sub(remain.size, now.size);
1229 remain.offset_within_address_space += int128_get64(now.size);
1230 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001231 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001232 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001233 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001234 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001235 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001236 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001237 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001238 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001239 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001240 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001241 }
1242}
1243
Sheng Yang62a27442010-01-26 19:21:16 +08001244void qemu_flush_coalesced_mmio_buffer(void)
1245{
1246 if (kvm_enabled())
1247 kvm_flush_coalesced_mmio_buffer();
1248}
1249
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001250void qemu_mutex_lock_ramlist(void)
1251{
1252 qemu_mutex_lock(&ram_list.mutex);
1253}
1254
1255void qemu_mutex_unlock_ramlist(void)
1256{
1257 qemu_mutex_unlock(&ram_list.mutex);
1258}
1259
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001260#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001261static void *file_ram_alloc(RAMBlock *block,
1262 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001263 const char *path,
1264 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001266 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001268 char *sanitized_name;
1269 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001270 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001271 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001272 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001273
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001274 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1275 error_setg(errp,
1276 "host lacks kvm mmu notifiers, -mem-path unsupported");
1277 return NULL;
1278 }
1279
1280 for (;;) {
1281 fd = open(path, O_RDWR);
1282 if (fd >= 0) {
1283 /* @path names an existing file, use it */
1284 break;
1285 }
1286 if (errno == ENOENT) {
1287 /* @path names a file that doesn't exist, create it */
1288 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1289 if (fd >= 0) {
1290 unlink_on_error = true;
1291 break;
1292 }
1293 } else if (errno == EISDIR) {
1294 /* @path names a directory, create a file there */
1295 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1296 sanitized_name = g_strdup(memory_region_name(block->mr));
1297 for (c = sanitized_name; *c != '\0'; c++) {
1298 if (*c == '/') {
1299 *c = '_';
1300 }
1301 }
1302
1303 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1304 sanitized_name);
1305 g_free(sanitized_name);
1306
1307 fd = mkstemp(filename);
1308 if (fd >= 0) {
1309 unlink(filename);
1310 g_free(filename);
1311 break;
1312 }
1313 g_free(filename);
1314 }
1315 if (errno != EEXIST && errno != EINTR) {
1316 error_setg_errno(errp, errno,
1317 "can't open backing store %s for guest RAM",
1318 path);
1319 goto error;
1320 }
1321 /*
1322 * Try again on EINTR and EEXIST. The latter happens when
1323 * something else creates the file between our two open().
1324 */
1325 }
1326
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001327 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001328 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001329
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001330 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001331 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001332 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001333 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001334 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001335 }
1336
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001337 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001338
1339 /*
1340 * ftruncate is not supported by hugetlbfs in older
1341 * hosts, so don't bother bailing out on errors.
1342 * If anything goes wrong with it under other filesystems,
1343 * mmap will fail.
1344 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001345 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001346 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001347 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001348
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001349 area = qemu_ram_mmap(fd, memory, block->mr->align,
1350 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001351 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001352 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001353 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001354 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001355 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001356
1357 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001358 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001359 }
1360
Alex Williamson04b16652010-07-02 11:13:17 -06001361 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001362 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001363
1364error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001365 if (unlink_on_error) {
1366 unlink(path);
1367 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001368 if (fd != -1) {
1369 close(fd);
1370 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001371 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001372}
1373#endif
1374
Mike Day0dc3f442013-09-05 14:41:35 -04001375/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001376static ram_addr_t find_ram_offset(ram_addr_t size)
1377{
Alex Williamson04b16652010-07-02 11:13:17 -06001378 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001379 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001380
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001381 assert(size != 0); /* it would hand out same offset multiple times */
1382
Mike Day0dc3f442013-09-05 14:41:35 -04001383 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001384 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001385 }
Alex Williamson04b16652010-07-02 11:13:17 -06001386
Mike Day0dc3f442013-09-05 14:41:35 -04001387 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001388 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001389
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001390 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001391
Mike Day0dc3f442013-09-05 14:41:35 -04001392 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001393 if (next_block->offset >= end) {
1394 next = MIN(next, next_block->offset);
1395 }
1396 }
1397 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001398 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001399 mingap = next - end;
1400 }
1401 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001402
1403 if (offset == RAM_ADDR_MAX) {
1404 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1405 (uint64_t)size);
1406 abort();
1407 }
1408
Alex Williamson04b16652010-07-02 11:13:17 -06001409 return offset;
1410}
1411
Juan Quintela652d7ec2012-07-20 10:37:54 +02001412ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001413{
Alex Williamsond17b5282010-06-25 11:08:38 -06001414 RAMBlock *block;
1415 ram_addr_t last = 0;
1416
Mike Day0dc3f442013-09-05 14:41:35 -04001417 rcu_read_lock();
1418 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001419 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001420 }
Mike Day0dc3f442013-09-05 14:41:35 -04001421 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001422 return last;
1423}
1424
Jason Baronddb97f12012-08-02 15:44:16 -04001425static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1426{
1427 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001428
1429 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001430 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001431 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1432 if (ret) {
1433 perror("qemu_madvise");
1434 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1435 "but dump_guest_core=off specified\n");
1436 }
1437 }
1438}
1439
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001440const char *qemu_ram_get_idstr(RAMBlock *rb)
1441{
1442 return rb->idstr;
1443}
1444
Mike Dayae3a7042013-09-05 14:41:35 -04001445/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001446void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001447{
Gongleifa53a0e2016-05-10 10:04:59 +08001448 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001449
Avi Kivityc5705a72011-12-20 15:59:12 +02001450 assert(new_block);
1451 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001452
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001453 if (dev) {
1454 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001455 if (id) {
1456 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001457 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001458 }
1459 }
1460 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1461
Gongleiab0a9952016-05-10 10:05:00 +08001462 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001463 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001464 if (block != new_block &&
1465 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001466 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1467 new_block->idstr);
1468 abort();
1469 }
1470 }
Mike Day0dc3f442013-09-05 14:41:35 -04001471 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001472}
1473
Mike Dayae3a7042013-09-05 14:41:35 -04001474/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001475void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001476{
Mike Dayae3a7042013-09-05 14:41:35 -04001477 /* FIXME: arch_init.c assumes that this is not called throughout
1478 * migration. Ignore the problem since hot-unplug during migration
1479 * does not work anyway.
1480 */
Hu Tao20cfe882014-04-02 15:13:26 +08001481 if (block) {
1482 memset(block->idstr, 0, sizeof(block->idstr));
1483 }
1484}
1485
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001486static int memory_try_enable_merging(void *addr, size_t len)
1487{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001488 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001489 /* disabled by the user */
1490 return 0;
1491 }
1492
1493 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1494}
1495
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001496/* Only legal before guest might have detected the memory size: e.g. on
1497 * incoming migration, or right after reset.
1498 *
1499 * As memory core doesn't know how is memory accessed, it is up to
1500 * resize callback to update device state and/or add assertions to detect
1501 * misuse, if necessary.
1502 */
Gongleifa53a0e2016-05-10 10:04:59 +08001503int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001504{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001505 assert(block);
1506
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001507 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001508
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001509 if (block->used_length == newsize) {
1510 return 0;
1511 }
1512
1513 if (!(block->flags & RAM_RESIZEABLE)) {
1514 error_setg_errno(errp, EINVAL,
1515 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1516 " in != 0x" RAM_ADDR_FMT, block->idstr,
1517 newsize, block->used_length);
1518 return -EINVAL;
1519 }
1520
1521 if (block->max_length < newsize) {
1522 error_setg_errno(errp, EINVAL,
1523 "Length too large: %s: 0x" RAM_ADDR_FMT
1524 " > 0x" RAM_ADDR_FMT, block->idstr,
1525 newsize, block->max_length);
1526 return -EINVAL;
1527 }
1528
1529 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1530 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001531 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1532 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001533 memory_region_set_size(block->mr, newsize);
1534 if (block->resized) {
1535 block->resized(block->idstr, newsize, block->host);
1536 }
1537 return 0;
1538}
1539
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001540/* Called with ram_list.mutex held */
1541static void dirty_memory_extend(ram_addr_t old_ram_size,
1542 ram_addr_t new_ram_size)
1543{
1544 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1545 DIRTY_MEMORY_BLOCK_SIZE);
1546 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1547 DIRTY_MEMORY_BLOCK_SIZE);
1548 int i;
1549
1550 /* Only need to extend if block count increased */
1551 if (new_num_blocks <= old_num_blocks) {
1552 return;
1553 }
1554
1555 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1556 DirtyMemoryBlocks *old_blocks;
1557 DirtyMemoryBlocks *new_blocks;
1558 int j;
1559
1560 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1561 new_blocks = g_malloc(sizeof(*new_blocks) +
1562 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1563
1564 if (old_num_blocks) {
1565 memcpy(new_blocks->blocks, old_blocks->blocks,
1566 old_num_blocks * sizeof(old_blocks->blocks[0]));
1567 }
1568
1569 for (j = old_num_blocks; j < new_num_blocks; j++) {
1570 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1571 }
1572
1573 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1574
1575 if (old_blocks) {
1576 g_free_rcu(old_blocks, rcu);
1577 }
1578 }
1579}
1580
Fam Zheng528f46a2016-03-01 14:18:18 +08001581static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001582{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001583 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001584 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001585 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001586 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001587
1588 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001589
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001590 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001591 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592
1593 if (!new_block->host) {
1594 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001595 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001596 new_block->mr, &err);
1597 if (err) {
1598 error_propagate(errp, err);
1599 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001600 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001601 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001602 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001603 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001604 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001605 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001606 error_setg_errno(errp, errno,
1607 "cannot set up guest memory '%s'",
1608 memory_region_name(new_block->mr));
1609 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001610 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001611 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001612 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001613 }
1614 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001615
Li Zhijiandd631692015-07-02 20:18:06 +08001616 new_ram_size = MAX(old_ram_size,
1617 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1618 if (new_ram_size > old_ram_size) {
1619 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001620 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001621 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001622 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1623 * QLIST (which has an RCU-friendly variant) does not have insertion at
1624 * tail, so save the last element in last_block.
1625 */
Mike Day0dc3f442013-09-05 14:41:35 -04001626 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001627 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001628 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001629 break;
1630 }
1631 }
1632 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001633 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001634 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001635 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001636 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001637 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001638 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001639 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001640
Mike Day0dc3f442013-09-05 14:41:35 -04001641 /* Write list before version */
1642 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001643 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001644 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001645
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001646 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001647 new_block->used_length,
1648 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001649
Paolo Bonzinia904c912015-01-21 16:18:35 +01001650 if (new_block->host) {
1651 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1652 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1653 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1654 if (kvm_enabled()) {
1655 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1656 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001658}
1659
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001660#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001661RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1662 bool share, const char *mem_path,
1663 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001664{
1665 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001666 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001667
1668 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001669 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001670 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671 }
1672
1673 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1674 /*
1675 * file_ram_alloc() needs to allocate just like
1676 * phys_mem_alloc, but we haven't bothered to provide
1677 * a hook there.
1678 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001679 error_setg(errp,
1680 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001681 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001682 }
1683
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001684 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001685 new_block = g_malloc0(sizeof(*new_block));
1686 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001687 new_block->used_length = size;
1688 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001689 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001690 new_block->host = file_ram_alloc(new_block, size,
1691 mem_path, errp);
1692 if (!new_block->host) {
1693 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001694 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001695 }
1696
Fam Zheng528f46a2016-03-01 14:18:18 +08001697 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001698 if (local_err) {
1699 g_free(new_block);
1700 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001701 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001702 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001703 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001704}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001705#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001706
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001707static
Fam Zheng528f46a2016-03-01 14:18:18 +08001708RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1709 void (*resized)(const char*,
1710 uint64_t length,
1711 void *host),
1712 void *host, bool resizeable,
1713 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001714{
1715 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001716 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001717
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001718 size = HOST_PAGE_ALIGN(size);
1719 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001720 new_block = g_malloc0(sizeof(*new_block));
1721 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001722 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001723 new_block->used_length = size;
1724 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001725 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001726 new_block->fd = -1;
1727 new_block->host = host;
1728 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001729 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001730 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001731 if (resizeable) {
1732 new_block->flags |= RAM_RESIZEABLE;
1733 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001734 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001735 if (local_err) {
1736 g_free(new_block);
1737 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001738 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001739 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001740 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001741}
1742
Fam Zheng528f46a2016-03-01 14:18:18 +08001743RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001744 MemoryRegion *mr, Error **errp)
1745{
1746 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1747}
1748
Fam Zheng528f46a2016-03-01 14:18:18 +08001749RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001750{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001751 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1752}
1753
Fam Zheng528f46a2016-03-01 14:18:18 +08001754RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001755 void (*resized)(const char*,
1756 uint64_t length,
1757 void *host),
1758 MemoryRegion *mr, Error **errp)
1759{
1760 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001761}
bellarde9a1ab12007-02-08 23:08:38 +00001762
Paolo Bonzini43771532013-09-09 17:58:40 +02001763static void reclaim_ramblock(RAMBlock *block)
1764{
1765 if (block->flags & RAM_PREALLOC) {
1766 ;
1767 } else if (xen_enabled()) {
1768 xen_invalidate_map_cache_entry(block->host);
1769#ifndef _WIN32
1770 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001771 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001772 close(block->fd);
1773#endif
1774 } else {
1775 qemu_anon_ram_free(block->host, block->max_length);
1776 }
1777 g_free(block);
1778}
1779
Fam Zhengf1060c52016-03-01 14:18:22 +08001780void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001781{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001782 if (!block) {
1783 return;
1784 }
1785
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001786 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001787 QLIST_REMOVE_RCU(block, next);
1788 ram_list.mru_block = NULL;
1789 /* Write list before version */
1790 smp_wmb();
1791 ram_list.version++;
1792 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001793 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001794}
1795
Huang Yingcd19cfa2011-03-02 08:56:19 +01001796#ifndef _WIN32
1797void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1798{
1799 RAMBlock *block;
1800 ram_addr_t offset;
1801 int flags;
1802 void *area, *vaddr;
1803
Mike Day0dc3f442013-09-05 14:41:35 -04001804 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001805 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001806 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001807 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001808 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001809 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001810 } else if (xen_enabled()) {
1811 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001812 } else {
1813 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001814 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001815 flags |= (block->flags & RAM_SHARED ?
1816 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001817 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1818 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001819 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001820 /*
1821 * Remap needs to match alloc. Accelerators that
1822 * set phys_mem_alloc never remap. If they did,
1823 * we'd need a remap hook here.
1824 */
1825 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1826
Huang Yingcd19cfa2011-03-02 08:56:19 +01001827 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1828 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1829 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001830 }
1831 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001832 fprintf(stderr, "Could not remap addr: "
1833 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001834 length, addr);
1835 exit(1);
1836 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001837 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001838 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001839 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001840 }
1841 }
1842}
1843#endif /* !_WIN32 */
1844
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001845/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001846 * This should not be used for general purpose DMA. Use address_space_map
1847 * or address_space_rw instead. For local memory (e.g. video ram) that the
1848 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001849 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001850 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001851 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001852void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001853{
Gonglei3655cb92016-02-20 10:35:20 +08001854 RAMBlock *block = ram_block;
1855
1856 if (block == NULL) {
1857 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001858 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001859 }
Mike Dayae3a7042013-09-05 14:41:35 -04001860
1861 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001862 /* We need to check if the requested address is in the RAM
1863 * because we don't want to map the entire memory in QEMU.
1864 * In that case just map until the end of the page.
1865 */
1866 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001867 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001868 }
Mike Dayae3a7042013-09-05 14:41:35 -04001869
1870 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001871 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001872 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001873}
1874
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001875/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001876 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001877 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001878 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001879 */
Gonglei3655cb92016-02-20 10:35:20 +08001880static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1881 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001882{
Gonglei3655cb92016-02-20 10:35:20 +08001883 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001884 if (*size == 0) {
1885 return NULL;
1886 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001887
Gonglei3655cb92016-02-20 10:35:20 +08001888 if (block == NULL) {
1889 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001890 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001891 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001892 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001893
1894 if (xen_enabled() && block->host == NULL) {
1895 /* We need to check if the requested address is in the RAM
1896 * because we don't want to map the entire memory in QEMU.
1897 * In that case just map the requested area.
1898 */
1899 if (block->offset == 0) {
1900 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001901 }
1902
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001903 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001904 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001905
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001906 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001907}
1908
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001909/*
1910 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1911 * in that RAMBlock.
1912 *
1913 * ptr: Host pointer to look up
1914 * round_offset: If true round the result offset down to a page boundary
1915 * *ram_addr: set to result ram_addr
1916 * *offset: set to result offset within the RAMBlock
1917 *
1918 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001919 *
1920 * By the time this function returns, the returned pointer is not protected
1921 * by RCU anymore. If the caller is not within an RCU critical section and
1922 * does not hold the iothread lock, it must have other means of protecting the
1923 * pointer, such as a reference to the region that includes the incoming
1924 * ram_addr_t.
1925 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001926RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001927 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001928{
pbrook94a6b542009-04-11 17:15:54 +00001929 RAMBlock *block;
1930 uint8_t *host = ptr;
1931
Jan Kiszka868bb332011-06-21 22:59:09 +02001932 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001933 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001934 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001935 ram_addr = xen_ram_addr_from_mapcache(ptr);
1936 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001937 if (block) {
1938 *offset = (host - block->host);
1939 }
Mike Day0dc3f442013-09-05 14:41:35 -04001940 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001941 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001942 }
1943
Mike Day0dc3f442013-09-05 14:41:35 -04001944 rcu_read_lock();
1945 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001946 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001947 goto found;
1948 }
1949
Mike Day0dc3f442013-09-05 14:41:35 -04001950 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001951 /* This case append when the block is not mapped. */
1952 if (block->host == NULL) {
1953 continue;
1954 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001955 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001956 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001957 }
pbrook94a6b542009-04-11 17:15:54 +00001958 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001959
Mike Day0dc3f442013-09-05 14:41:35 -04001960 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001961 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001962
1963found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001964 *offset = (host - block->host);
1965 if (round_offset) {
1966 *offset &= TARGET_PAGE_MASK;
1967 }
Mike Day0dc3f442013-09-05 14:41:35 -04001968 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001969 return block;
1970}
1971
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001972/*
1973 * Finds the named RAMBlock
1974 *
1975 * name: The name of RAMBlock to find
1976 *
1977 * Returns: RAMBlock (or NULL if not found)
1978 */
1979RAMBlock *qemu_ram_block_by_name(const char *name)
1980{
1981 RAMBlock *block;
1982
1983 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1984 if (!strcmp(name, block->idstr)) {
1985 return block;
1986 }
1987 }
1988
1989 return NULL;
1990}
1991
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001992/* Some of the softmmu routines need to translate from a host pointer
1993 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001994ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001995{
1996 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001997 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001998
Paolo Bonzinif615f392016-05-26 10:07:50 +02001999 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002000 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002001 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002002 }
2003
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002004 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002005}
Alex Williamsonf471a172010-06-11 11:11:42 -06002006
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002007/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002008static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002009 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002010{
Juan Quintela52159192013-10-08 12:44:04 +02002011 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002012 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002013 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002014 switch (size) {
2015 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002016 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002017 break;
2018 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002019 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002020 break;
2021 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002022 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002023 break;
2024 default:
2025 abort();
2026 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002027 /* Set both VGA and migration bits for simplicity and to remove
2028 * the notdirty callback faster.
2029 */
2030 cpu_physical_memory_set_dirty_range(ram_addr, size,
2031 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002032 /* we remove the notdirty callback only if the code has been
2033 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002034 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002035 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002036 }
bellard1ccde1c2004-02-06 19:46:14 +00002037}
2038
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002039static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2040 unsigned size, bool is_write)
2041{
2042 return is_write;
2043}
2044
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002045static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002046 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002047 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002048 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002049};
2050
pbrook0f459d12008-06-09 00:20:13 +00002051/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002052static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002053{
Andreas Färber93afead2013-08-26 03:41:01 +02002054 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002055 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002056 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002057 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002058 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002059 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002060 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002061
Andreas Färberff4700b2013-08-26 18:23:18 +02002062 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002063 /* We re-entered the check after replacing the TB. Now raise
2064 * the debug interrupt so that is will trigger after the
2065 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002066 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002067 return;
2068 }
Andreas Färber93afead2013-08-26 03:41:01 +02002069 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002070 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002071 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2072 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002073 if (flags == BP_MEM_READ) {
2074 wp->flags |= BP_WATCHPOINT_HIT_READ;
2075 } else {
2076 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2077 }
2078 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002079 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002080 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002081 if (wp->flags & BP_CPU &&
2082 !cc->debug_check_watchpoint(cpu, wp)) {
2083 wp->flags &= ~BP_WATCHPOINT_HIT;
2084 continue;
2085 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002086 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002087 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002088 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002089 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002090 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002091 } else {
2092 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002093 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002094 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002095 }
aliguori06d55cc2008-11-18 20:24:06 +00002096 }
aliguori6e140f22008-11-18 20:37:55 +00002097 } else {
2098 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002099 }
2100 }
2101}
2102
pbrook6658ffb2007-03-16 23:58:11 +00002103/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2104 so these check for a hit then pass through to the normal out-of-line
2105 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002106static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2107 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002108{
Peter Maydell66b9b432015-04-26 16:49:24 +01002109 MemTxResult res;
2110 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002111 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2112 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002113
Peter Maydell66b9b432015-04-26 16:49:24 +01002114 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002115 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002116 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002117 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002118 break;
2119 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002120 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002121 break;
2122 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002123 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002124 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002125 default: abort();
2126 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002127 *pdata = data;
2128 return res;
2129}
2130
2131static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2132 uint64_t val, unsigned size,
2133 MemTxAttrs attrs)
2134{
2135 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002136 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2137 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002138
2139 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2140 switch (size) {
2141 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002142 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002143 break;
2144 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002145 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002146 break;
2147 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002148 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002149 break;
2150 default: abort();
2151 }
2152 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002153}
2154
Avi Kivity1ec9b902012-01-02 12:47:48 +02002155static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002156 .read_with_attrs = watch_mem_read,
2157 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002158 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002159};
pbrook6658ffb2007-03-16 23:58:11 +00002160
Peter Maydellf25a49e2015-04-26 16:49:24 +01002161static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2162 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002163{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002164 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002165 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002166 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002167
blueswir1db7b5422007-05-26 17:36:03 +00002168#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002169 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002170 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002171#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002172 res = address_space_read(subpage->as, addr + subpage->base,
2173 attrs, buf, len);
2174 if (res) {
2175 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002176 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002177 switch (len) {
2178 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002179 *data = ldub_p(buf);
2180 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002181 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002182 *data = lduw_p(buf);
2183 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002184 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002185 *data = ldl_p(buf);
2186 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002187 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002188 *data = ldq_p(buf);
2189 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002190 default:
2191 abort();
2192 }
blueswir1db7b5422007-05-26 17:36:03 +00002193}
2194
Peter Maydellf25a49e2015-04-26 16:49:24 +01002195static MemTxResult subpage_write(void *opaque, hwaddr addr,
2196 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002197{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002198 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002199 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002200
blueswir1db7b5422007-05-26 17:36:03 +00002201#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002202 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002203 " value %"PRIx64"\n",
2204 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002205#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002206 switch (len) {
2207 case 1:
2208 stb_p(buf, value);
2209 break;
2210 case 2:
2211 stw_p(buf, value);
2212 break;
2213 case 4:
2214 stl_p(buf, value);
2215 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002216 case 8:
2217 stq_p(buf, value);
2218 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002219 default:
2220 abort();
2221 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002222 return address_space_write(subpage->as, addr + subpage->base,
2223 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002224}
2225
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002226static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002227 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002228{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002229 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002230#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002231 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002232 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002233#endif
2234
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002235 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002236 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002237}
2238
Avi Kivity70c68e42012-01-02 12:32:48 +02002239static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002240 .read_with_attrs = subpage_read,
2241 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002242 .impl.min_access_size = 1,
2243 .impl.max_access_size = 8,
2244 .valid.min_access_size = 1,
2245 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002246 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002247 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002248};
2249
Anthony Liguoric227f092009-10-01 16:12:16 -05002250static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002251 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002252{
2253 int idx, eidx;
2254
2255 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2256 return -1;
2257 idx = SUBPAGE_IDX(start);
2258 eidx = SUBPAGE_IDX(end);
2259#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002260 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2261 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002262#endif
blueswir1db7b5422007-05-26 17:36:03 +00002263 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002264 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002265 }
2266
2267 return 0;
2268}
2269
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002270static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002271{
Anthony Liguoric227f092009-10-01 16:12:16 -05002272 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002273
Anthony Liguori7267c092011-08-20 22:09:37 -05002274 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002275
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002276 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002277 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002278 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002279 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002280 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002281#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002282 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2283 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002284#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002285 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002286
2287 return mmio;
2288}
2289
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002290static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2291 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002292{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002293 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002294 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002295 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002296 .mr = mr,
2297 .offset_within_address_space = 0,
2298 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002299 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002300 };
2301
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002303}
2304
Peter Maydella54c87b2016-01-21 14:15:05 +00002305MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002306{
Peter Maydella54c87b2016-01-21 14:15:05 +00002307 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2308 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002309 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002310 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002311
2312 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002313}
2314
Avi Kivitye9179ce2009-06-14 11:38:52 +03002315static void io_mem_init(void)
2316{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002317 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002318 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002319 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002320 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002321 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002322 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002323 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002324}
2325
Avi Kivityac1970f2012-10-03 16:22:53 +02002326static void mem_begin(MemoryListener *listener)
2327{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002328 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002329 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2330 uint16_t n;
2331
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002332 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002333 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002334 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002335 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002336 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002337 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002338 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002339 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002340
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002341 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002342 d->as = as;
2343 as->next_dispatch = d;
2344}
2345
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002346static void address_space_dispatch_free(AddressSpaceDispatch *d)
2347{
2348 phys_sections_free(&d->map);
2349 g_free(d);
2350}
2351
Paolo Bonzini00752702013-05-29 12:13:54 +02002352static void mem_commit(MemoryListener *listener)
2353{
2354 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002355 AddressSpaceDispatch *cur = as->dispatch;
2356 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002357
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002358 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002359
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002360 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002361 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002362 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002363 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002364}
2365
Avi Kivity1d711482012-10-02 18:54:45 +02002366static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002367{
Peter Maydell32857f42015-10-01 15:29:50 +01002368 CPUAddressSpace *cpuas;
2369 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002370
2371 /* since each CPU stores ram addresses in its TLB cache, we must
2372 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002373 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2374 cpu_reloading_memory_map();
2375 /* The CPU and TLB are protected by the iothread lock.
2376 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2377 * may have split the RCU critical section.
2378 */
2379 d = atomic_rcu_read(&cpuas->as->dispatch);
2380 cpuas->memory_dispatch = d;
2381 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002382}
2383
Avi Kivityac1970f2012-10-03 16:22:53 +02002384void address_space_init_dispatch(AddressSpace *as)
2385{
Paolo Bonzini00752702013-05-29 12:13:54 +02002386 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002387 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002388 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002389 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002390 .region_add = mem_add,
2391 .region_nop = mem_add,
2392 .priority = 0,
2393 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002394 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002395}
2396
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002397void address_space_unregister(AddressSpace *as)
2398{
2399 memory_listener_unregister(&as->dispatch_listener);
2400}
2401
Avi Kivity83f3c252012-10-07 12:59:55 +02002402void address_space_destroy_dispatch(AddressSpace *as)
2403{
2404 AddressSpaceDispatch *d = as->dispatch;
2405
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002406 atomic_rcu_set(&as->dispatch, NULL);
2407 if (d) {
2408 call_rcu(d, address_space_dispatch_free, rcu);
2409 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002410}
2411
Avi Kivity62152b82011-07-26 14:26:14 +03002412static void memory_map_init(void)
2413{
Anthony Liguori7267c092011-08-20 22:09:37 -05002414 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002415
Paolo Bonzini57271d62013-11-07 17:14:37 +01002416 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002417 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002418
Anthony Liguori7267c092011-08-20 22:09:37 -05002419 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002420 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2421 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002422 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002423}
2424
2425MemoryRegion *get_system_memory(void)
2426{
2427 return system_memory;
2428}
2429
Avi Kivity309cb472011-08-08 16:09:03 +03002430MemoryRegion *get_system_io(void)
2431{
2432 return system_io;
2433}
2434
pbrooke2eef172008-06-08 01:09:01 +00002435#endif /* !defined(CONFIG_USER_ONLY) */
2436
bellard13eb76e2004-01-24 15:23:36 +00002437/* physical memory access (slow version, mainly for debug) */
2438#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002439int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002440 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002441{
2442 int l, flags;
2443 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002444 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002445
2446 while (len > 0) {
2447 page = addr & TARGET_PAGE_MASK;
2448 l = (page + TARGET_PAGE_SIZE) - addr;
2449 if (l > len)
2450 l = len;
2451 flags = page_get_flags(page);
2452 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002453 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002454 if (is_write) {
2455 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002456 return -1;
bellard579a97f2007-11-11 14:26:47 +00002457 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002458 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002459 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002460 memcpy(p, buf, l);
2461 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002462 } else {
2463 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002464 return -1;
bellard579a97f2007-11-11 14:26:47 +00002465 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002466 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002467 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002468 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002469 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002470 }
2471 len -= l;
2472 buf += l;
2473 addr += l;
2474 }
Paul Brooka68fe892010-03-01 00:08:59 +00002475 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002476}
bellard8df1cd02005-01-28 22:37:22 +00002477
bellard13eb76e2004-01-24 15:23:36 +00002478#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002479
Paolo Bonzini845b6212015-03-23 11:45:53 +01002480static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002481 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002482{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002483 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002484 addr += memory_region_get_ram_addr(mr);
2485
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002486 /* No early return if dirty_log_mask is or becomes 0, because
2487 * cpu_physical_memory_set_dirty_range will still call
2488 * xen_modified_memory.
2489 */
2490 if (dirty_log_mask) {
2491 dirty_log_mask =
2492 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002493 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002494 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2495 tb_invalidate_phys_range(addr, addr + length);
2496 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2497 }
2498 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002499}
2500
Richard Henderson23326162013-07-08 14:55:59 -07002501static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002502{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002503 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002504
2505 /* Regions are assumed to support 1-4 byte accesses unless
2506 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002507 if (access_size_max == 0) {
2508 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002509 }
Richard Henderson23326162013-07-08 14:55:59 -07002510
2511 /* Bound the maximum access by the alignment of the address. */
2512 if (!mr->ops->impl.unaligned) {
2513 unsigned align_size_max = addr & -addr;
2514 if (align_size_max != 0 && align_size_max < access_size_max) {
2515 access_size_max = align_size_max;
2516 }
2517 }
2518
2519 /* Don't attempt accesses larger than the maximum. */
2520 if (l > access_size_max) {
2521 l = access_size_max;
2522 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002523 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002524
2525 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002526}
2527
Jan Kiszka4840f102015-06-18 18:47:22 +02002528static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002529{
Jan Kiszka4840f102015-06-18 18:47:22 +02002530 bool unlocked = !qemu_mutex_iothread_locked();
2531 bool release_lock = false;
2532
2533 if (unlocked && mr->global_locking) {
2534 qemu_mutex_lock_iothread();
2535 unlocked = false;
2536 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002537 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002538 if (mr->flush_coalesced_mmio) {
2539 if (unlocked) {
2540 qemu_mutex_lock_iothread();
2541 }
2542 qemu_flush_coalesced_mmio_buffer();
2543 if (unlocked) {
2544 qemu_mutex_unlock_iothread();
2545 }
2546 }
2547
2548 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002549}
2550
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002551/* Called within RCU critical section. */
2552static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2553 MemTxAttrs attrs,
2554 const uint8_t *buf,
2555 int len, hwaddr addr1,
2556 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002557{
bellard13eb76e2004-01-24 15:23:36 +00002558 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002559 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002560 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002561 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002562
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002563 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002564 if (!memory_access_is_direct(mr, true)) {
2565 release_lock |= prepare_mmio_access(mr);
2566 l = memory_access_size(mr, l, addr1);
2567 /* XXX: could force current_cpu to NULL to avoid
2568 potential bugs */
2569 switch (l) {
2570 case 8:
2571 /* 64 bit write access */
2572 val = ldq_p(buf);
2573 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2574 attrs);
2575 break;
2576 case 4:
2577 /* 32 bit write access */
2578 val = ldl_p(buf);
2579 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2580 attrs);
2581 break;
2582 case 2:
2583 /* 16 bit write access */
2584 val = lduw_p(buf);
2585 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2586 attrs);
2587 break;
2588 case 1:
2589 /* 8 bit write access */
2590 val = ldub_p(buf);
2591 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2592 attrs);
2593 break;
2594 default:
2595 abort();
bellard13eb76e2004-01-24 15:23:36 +00002596 }
2597 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002598 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002599 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002600 memcpy(ptr, buf, l);
2601 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002602 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002603
2604 if (release_lock) {
2605 qemu_mutex_unlock_iothread();
2606 release_lock = false;
2607 }
2608
bellard13eb76e2004-01-24 15:23:36 +00002609 len -= l;
2610 buf += l;
2611 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002612
2613 if (!len) {
2614 break;
2615 }
2616
2617 l = len;
2618 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002619 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002620
Peter Maydell3b643492015-04-26 16:49:23 +01002621 return result;
bellard13eb76e2004-01-24 15:23:36 +00002622}
bellard8df1cd02005-01-28 22:37:22 +00002623
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002624MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2625 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002626{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002627 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002628 hwaddr addr1;
2629 MemoryRegion *mr;
2630 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002631
2632 if (len > 0) {
2633 rcu_read_lock();
2634 l = len;
2635 mr = address_space_translate(as, addr, &addr1, &l, true);
2636 result = address_space_write_continue(as, addr, attrs, buf, len,
2637 addr1, l, mr);
2638 rcu_read_unlock();
2639 }
2640
2641 return result;
2642}
2643
2644/* Called within RCU critical section. */
2645MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2646 MemTxAttrs attrs, uint8_t *buf,
2647 int len, hwaddr addr1, hwaddr l,
2648 MemoryRegion *mr)
2649{
2650 uint8_t *ptr;
2651 uint64_t val;
2652 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002653 bool release_lock = false;
2654
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002655 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002656 if (!memory_access_is_direct(mr, false)) {
2657 /* I/O case */
2658 release_lock |= prepare_mmio_access(mr);
2659 l = memory_access_size(mr, l, addr1);
2660 switch (l) {
2661 case 8:
2662 /* 64 bit read access */
2663 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2664 attrs);
2665 stq_p(buf, val);
2666 break;
2667 case 4:
2668 /* 32 bit read access */
2669 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2670 attrs);
2671 stl_p(buf, val);
2672 break;
2673 case 2:
2674 /* 16 bit read access */
2675 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2676 attrs);
2677 stw_p(buf, val);
2678 break;
2679 case 1:
2680 /* 8 bit read access */
2681 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2682 attrs);
2683 stb_p(buf, val);
2684 break;
2685 default:
2686 abort();
2687 }
2688 } else {
2689 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002690 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002691 memcpy(buf, ptr, l);
2692 }
2693
2694 if (release_lock) {
2695 qemu_mutex_unlock_iothread();
2696 release_lock = false;
2697 }
2698
2699 len -= l;
2700 buf += l;
2701 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002702
2703 if (!len) {
2704 break;
2705 }
2706
2707 l = len;
2708 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002709 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002710
2711 return result;
2712}
2713
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002714MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2715 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002716{
2717 hwaddr l;
2718 hwaddr addr1;
2719 MemoryRegion *mr;
2720 MemTxResult result = MEMTX_OK;
2721
2722 if (len > 0) {
2723 rcu_read_lock();
2724 l = len;
2725 mr = address_space_translate(as, addr, &addr1, &l, false);
2726 result = address_space_read_continue(as, addr, attrs, buf, len,
2727 addr1, l, mr);
2728 rcu_read_unlock();
2729 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002730
2731 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002732}
2733
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002734MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2735 uint8_t *buf, int len, bool is_write)
2736{
2737 if (is_write) {
2738 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2739 } else {
2740 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2741 }
2742}
Avi Kivityac1970f2012-10-03 16:22:53 +02002743
Avi Kivitya8170e52012-10-23 12:30:10 +02002744void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002745 int len, int is_write)
2746{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002747 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2748 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002749}
2750
Alexander Graf582b55a2013-12-11 14:17:44 +01002751enum write_rom_type {
2752 WRITE_DATA,
2753 FLUSH_CACHE,
2754};
2755
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002756static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002757 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002758{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002759 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002760 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002761 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002762 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002763
Paolo Bonzini41063e12015-03-18 14:21:43 +01002764 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002765 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002766 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002767 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002768
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002769 if (!(memory_region_is_ram(mr) ||
2770 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002771 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002772 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002773 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002774 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002775 switch (type) {
2776 case WRITE_DATA:
2777 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002778 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002779 break;
2780 case FLUSH_CACHE:
2781 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2782 break;
2783 }
bellardd0ecd2a2006-04-23 17:14:48 +00002784 }
2785 len -= l;
2786 buf += l;
2787 addr += l;
2788 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002789 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002790}
2791
Alexander Graf582b55a2013-12-11 14:17:44 +01002792/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002793void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002794 const uint8_t *buf, int len)
2795{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002796 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002797}
2798
2799void cpu_flush_icache_range(hwaddr start, int len)
2800{
2801 /*
2802 * This function should do the same thing as an icache flush that was
2803 * triggered from within the guest. For TCG we are always cache coherent,
2804 * so there is no need to flush anything. For KVM / Xen we need to flush
2805 * the host's instruction cache at least.
2806 */
2807 if (tcg_enabled()) {
2808 return;
2809 }
2810
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002811 cpu_physical_memory_write_rom_internal(&address_space_memory,
2812 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002813}
2814
aliguori6d16c2f2009-01-22 16:59:11 +00002815typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002816 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002817 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002818 hwaddr addr;
2819 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002820 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002821} BounceBuffer;
2822
2823static BounceBuffer bounce;
2824
aliguoriba223c22009-01-22 16:59:16 +00002825typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002826 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002827 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002828} MapClient;
2829
Fam Zheng38e047b2015-03-16 17:03:35 +08002830QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002831static QLIST_HEAD(map_client_list, MapClient) map_client_list
2832 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002833
Fam Zhenge95205e2015-03-16 17:03:37 +08002834static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002835{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002836 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002837 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002838}
2839
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002840static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002841{
2842 MapClient *client;
2843
Blue Swirl72cf2d42009-09-12 07:36:22 +00002844 while (!QLIST_EMPTY(&map_client_list)) {
2845 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002846 qemu_bh_schedule(client->bh);
2847 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002848 }
2849}
2850
Fam Zhenge95205e2015-03-16 17:03:37 +08002851void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002852{
2853 MapClient *client = g_malloc(sizeof(*client));
2854
Fam Zheng38e047b2015-03-16 17:03:35 +08002855 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002856 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002857 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002858 if (!atomic_read(&bounce.in_use)) {
2859 cpu_notify_map_clients_locked();
2860 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002861 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002862}
2863
Fam Zheng38e047b2015-03-16 17:03:35 +08002864void cpu_exec_init_all(void)
2865{
2866 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002867 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002868 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002869 qemu_mutex_init(&map_client_list_lock);
2870}
2871
Fam Zhenge95205e2015-03-16 17:03:37 +08002872void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002873{
Fam Zhenge95205e2015-03-16 17:03:37 +08002874 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002875
Fam Zhenge95205e2015-03-16 17:03:37 +08002876 qemu_mutex_lock(&map_client_list_lock);
2877 QLIST_FOREACH(client, &map_client_list, link) {
2878 if (client->bh == bh) {
2879 cpu_unregister_map_client_do(client);
2880 break;
2881 }
2882 }
2883 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002884}
2885
2886static void cpu_notify_map_clients(void)
2887{
Fam Zheng38e047b2015-03-16 17:03:35 +08002888 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002889 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002890 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002891}
2892
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002893bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2894{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002895 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002896 hwaddr l, xlat;
2897
Paolo Bonzini41063e12015-03-18 14:21:43 +01002898 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002899 while (len > 0) {
2900 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002901 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2902 if (!memory_access_is_direct(mr, is_write)) {
2903 l = memory_access_size(mr, l, addr);
2904 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002905 return false;
2906 }
2907 }
2908
2909 len -= l;
2910 addr += l;
2911 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002912 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002913 return true;
2914}
2915
aliguori6d16c2f2009-01-22 16:59:11 +00002916/* Map a physical memory region into a host virtual address.
2917 * May map a subset of the requested range, given by and returned in *plen.
2918 * May return NULL if resources needed to perform the mapping are exhausted.
2919 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002920 * Use cpu_register_map_client() to know when retrying the map operation is
2921 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002922 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002923void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002924 hwaddr addr,
2925 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002926 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002927{
Avi Kivitya8170e52012-10-23 12:30:10 +02002928 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002929 hwaddr done = 0;
2930 hwaddr l, xlat, base;
2931 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002932 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002933
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002934 if (len == 0) {
2935 return NULL;
2936 }
aliguori6d16c2f2009-01-22 16:59:11 +00002937
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002938 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002939 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002940 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002941
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002942 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002943 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002944 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002945 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002946 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002947 /* Avoid unbounded allocations */
2948 l = MIN(l, TARGET_PAGE_SIZE);
2949 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002950 bounce.addr = addr;
2951 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002952
2953 memory_region_ref(mr);
2954 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002955 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002956 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2957 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002958 }
aliguori6d16c2f2009-01-22 16:59:11 +00002959
Paolo Bonzini41063e12015-03-18 14:21:43 +01002960 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002961 *plen = l;
2962 return bounce.buffer;
2963 }
2964
2965 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966
2967 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002968 len -= l;
2969 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002970 done += l;
2971 if (len == 0) {
2972 break;
2973 }
2974
2975 l = len;
2976 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2977 if (this_mr != mr || xlat != base + done) {
2978 break;
2979 }
aliguori6d16c2f2009-01-22 16:59:11 +00002980 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002981
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002982 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002983 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002984 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002985 rcu_read_unlock();
2986
2987 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002988}
2989
Avi Kivityac1970f2012-10-03 16:22:53 +02002990/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002991 * Will also mark the memory as dirty if is_write == 1. access_len gives
2992 * the amount of memory that was actually read or written by the caller.
2993 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002994void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2995 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002996{
2997 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002998 MemoryRegion *mr;
2999 ram_addr_t addr1;
3000
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01003001 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003002 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003003 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003004 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003005 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003006 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003007 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003008 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003009 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003010 return;
3011 }
3012 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003013 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3014 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003015 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003016 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003017 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003018 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003019 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003020 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003021}
bellardd0ecd2a2006-04-23 17:14:48 +00003022
Avi Kivitya8170e52012-10-23 12:30:10 +02003023void *cpu_physical_memory_map(hwaddr addr,
3024 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003025 int is_write)
3026{
3027 return address_space_map(&address_space_memory, addr, plen, is_write);
3028}
3029
Avi Kivitya8170e52012-10-23 12:30:10 +02003030void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3031 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003032{
3033 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3034}
3035
bellard8df1cd02005-01-28 22:37:22 +00003036/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003037static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3038 MemTxAttrs attrs,
3039 MemTxResult *result,
3040 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003041{
bellard8df1cd02005-01-28 22:37:22 +00003042 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003043 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003044 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003045 hwaddr l = 4;
3046 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003047 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003048 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003049
Paolo Bonzini41063e12015-03-18 14:21:43 +01003050 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003051 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003052 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003053 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003054
bellard8df1cd02005-01-28 22:37:22 +00003055 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003056 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003057#if defined(TARGET_WORDS_BIGENDIAN)
3058 if (endian == DEVICE_LITTLE_ENDIAN) {
3059 val = bswap32(val);
3060 }
3061#else
3062 if (endian == DEVICE_BIG_ENDIAN) {
3063 val = bswap32(val);
3064 }
3065#endif
bellard8df1cd02005-01-28 22:37:22 +00003066 } else {
3067 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003068 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003069 switch (endian) {
3070 case DEVICE_LITTLE_ENDIAN:
3071 val = ldl_le_p(ptr);
3072 break;
3073 case DEVICE_BIG_ENDIAN:
3074 val = ldl_be_p(ptr);
3075 break;
3076 default:
3077 val = ldl_p(ptr);
3078 break;
3079 }
Peter Maydell50013112015-04-26 16:49:24 +01003080 r = MEMTX_OK;
3081 }
3082 if (result) {
3083 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003084 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003085 if (release_lock) {
3086 qemu_mutex_unlock_iothread();
3087 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003088 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003089 return val;
3090}
3091
Peter Maydell50013112015-04-26 16:49:24 +01003092uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3093 MemTxAttrs attrs, MemTxResult *result)
3094{
3095 return address_space_ldl_internal(as, addr, attrs, result,
3096 DEVICE_NATIVE_ENDIAN);
3097}
3098
3099uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3100 MemTxAttrs attrs, MemTxResult *result)
3101{
3102 return address_space_ldl_internal(as, addr, attrs, result,
3103 DEVICE_LITTLE_ENDIAN);
3104}
3105
3106uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3107 MemTxAttrs attrs, MemTxResult *result)
3108{
3109 return address_space_ldl_internal(as, addr, attrs, result,
3110 DEVICE_BIG_ENDIAN);
3111}
3112
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003113uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003114{
Peter Maydell50013112015-04-26 16:49:24 +01003115 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003116}
3117
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003118uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003119{
Peter Maydell50013112015-04-26 16:49:24 +01003120 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003121}
3122
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003123uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003124{
Peter Maydell50013112015-04-26 16:49:24 +01003125 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003126}
3127
bellard84b7b8e2005-11-28 21:19:04 +00003128/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003129static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3130 MemTxAttrs attrs,
3131 MemTxResult *result,
3132 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003133{
bellard84b7b8e2005-11-28 21:19:04 +00003134 uint8_t *ptr;
3135 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003136 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003137 hwaddr l = 8;
3138 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003139 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003140 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003141
Paolo Bonzini41063e12015-03-18 14:21:43 +01003142 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003143 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003144 false);
3145 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003146 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003147
bellard84b7b8e2005-11-28 21:19:04 +00003148 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003149 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003150#if defined(TARGET_WORDS_BIGENDIAN)
3151 if (endian == DEVICE_LITTLE_ENDIAN) {
3152 val = bswap64(val);
3153 }
3154#else
3155 if (endian == DEVICE_BIG_ENDIAN) {
3156 val = bswap64(val);
3157 }
3158#endif
bellard84b7b8e2005-11-28 21:19:04 +00003159 } else {
3160 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003161 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003162 switch (endian) {
3163 case DEVICE_LITTLE_ENDIAN:
3164 val = ldq_le_p(ptr);
3165 break;
3166 case DEVICE_BIG_ENDIAN:
3167 val = ldq_be_p(ptr);
3168 break;
3169 default:
3170 val = ldq_p(ptr);
3171 break;
3172 }
Peter Maydell50013112015-04-26 16:49:24 +01003173 r = MEMTX_OK;
3174 }
3175 if (result) {
3176 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003177 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003178 if (release_lock) {
3179 qemu_mutex_unlock_iothread();
3180 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003181 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003182 return val;
3183}
3184
Peter Maydell50013112015-04-26 16:49:24 +01003185uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3186 MemTxAttrs attrs, MemTxResult *result)
3187{
3188 return address_space_ldq_internal(as, addr, attrs, result,
3189 DEVICE_NATIVE_ENDIAN);
3190}
3191
3192uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3193 MemTxAttrs attrs, MemTxResult *result)
3194{
3195 return address_space_ldq_internal(as, addr, attrs, result,
3196 DEVICE_LITTLE_ENDIAN);
3197}
3198
3199uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3200 MemTxAttrs attrs, MemTxResult *result)
3201{
3202 return address_space_ldq_internal(as, addr, attrs, result,
3203 DEVICE_BIG_ENDIAN);
3204}
3205
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003206uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003207{
Peter Maydell50013112015-04-26 16:49:24 +01003208 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003209}
3210
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003211uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003212{
Peter Maydell50013112015-04-26 16:49:24 +01003213 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003214}
3215
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003216uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003217{
Peter Maydell50013112015-04-26 16:49:24 +01003218 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003219}
3220
bellardaab33092005-10-30 20:48:42 +00003221/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003222uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3223 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003224{
3225 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003226 MemTxResult r;
3227
3228 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3229 if (result) {
3230 *result = r;
3231 }
bellardaab33092005-10-30 20:48:42 +00003232 return val;
3233}
3234
Peter Maydell50013112015-04-26 16:49:24 +01003235uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3236{
3237 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3238}
3239
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003240/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003241static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3242 hwaddr addr,
3243 MemTxAttrs attrs,
3244 MemTxResult *result,
3245 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003246{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003247 uint8_t *ptr;
3248 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003249 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003250 hwaddr l = 2;
3251 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003252 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003253 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003254
Paolo Bonzini41063e12015-03-18 14:21:43 +01003255 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003256 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003257 false);
3258 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003259 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003260
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003261 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003262 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003263#if defined(TARGET_WORDS_BIGENDIAN)
3264 if (endian == DEVICE_LITTLE_ENDIAN) {
3265 val = bswap16(val);
3266 }
3267#else
3268 if (endian == DEVICE_BIG_ENDIAN) {
3269 val = bswap16(val);
3270 }
3271#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003272 } else {
3273 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003274 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003275 switch (endian) {
3276 case DEVICE_LITTLE_ENDIAN:
3277 val = lduw_le_p(ptr);
3278 break;
3279 case DEVICE_BIG_ENDIAN:
3280 val = lduw_be_p(ptr);
3281 break;
3282 default:
3283 val = lduw_p(ptr);
3284 break;
3285 }
Peter Maydell50013112015-04-26 16:49:24 +01003286 r = MEMTX_OK;
3287 }
3288 if (result) {
3289 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003290 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003291 if (release_lock) {
3292 qemu_mutex_unlock_iothread();
3293 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003294 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003295 return val;
bellardaab33092005-10-30 20:48:42 +00003296}
3297
Peter Maydell50013112015-04-26 16:49:24 +01003298uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3299 MemTxAttrs attrs, MemTxResult *result)
3300{
3301 return address_space_lduw_internal(as, addr, attrs, result,
3302 DEVICE_NATIVE_ENDIAN);
3303}
3304
3305uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3306 MemTxAttrs attrs, MemTxResult *result)
3307{
3308 return address_space_lduw_internal(as, addr, attrs, result,
3309 DEVICE_LITTLE_ENDIAN);
3310}
3311
3312uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3313 MemTxAttrs attrs, MemTxResult *result)
3314{
3315 return address_space_lduw_internal(as, addr, attrs, result,
3316 DEVICE_BIG_ENDIAN);
3317}
3318
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003319uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003320{
Peter Maydell50013112015-04-26 16:49:24 +01003321 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003322}
3323
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003324uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003325{
Peter Maydell50013112015-04-26 16:49:24 +01003326 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003327}
3328
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003329uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003330{
Peter Maydell50013112015-04-26 16:49:24 +01003331 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003332}
3333
bellard8df1cd02005-01-28 22:37:22 +00003334/* warning: addr must be aligned. The ram page is not masked as dirty
3335 and the code inside is not invalidated. It is useful if the dirty
3336 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003337void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3338 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003339{
bellard8df1cd02005-01-28 22:37:22 +00003340 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003341 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003342 hwaddr l = 4;
3343 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003344 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003345 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003346 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003347
Paolo Bonzini41063e12015-03-18 14:21:43 +01003348 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003349 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003350 true);
3351 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003352 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003353
Peter Maydell50013112015-04-26 16:49:24 +01003354 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003355 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003356 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003357 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003358
Paolo Bonzini845b6212015-03-23 11:45:53 +01003359 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3360 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003361 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3362 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003363 r = MEMTX_OK;
3364 }
3365 if (result) {
3366 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003367 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003368 if (release_lock) {
3369 qemu_mutex_unlock_iothread();
3370 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003371 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003372}
3373
Peter Maydell50013112015-04-26 16:49:24 +01003374void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3375{
3376 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3377}
3378
bellard8df1cd02005-01-28 22:37:22 +00003379/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003380static inline void address_space_stl_internal(AddressSpace *as,
3381 hwaddr addr, uint32_t val,
3382 MemTxAttrs attrs,
3383 MemTxResult *result,
3384 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003385{
bellard8df1cd02005-01-28 22:37:22 +00003386 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003387 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003388 hwaddr l = 4;
3389 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003390 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003391 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003392
Paolo Bonzini41063e12015-03-18 14:21:43 +01003393 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003394 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003395 true);
3396 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003397 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003398
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003399#if defined(TARGET_WORDS_BIGENDIAN)
3400 if (endian == DEVICE_LITTLE_ENDIAN) {
3401 val = bswap32(val);
3402 }
3403#else
3404 if (endian == DEVICE_BIG_ENDIAN) {
3405 val = bswap32(val);
3406 }
3407#endif
Peter Maydell50013112015-04-26 16:49:24 +01003408 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003409 } else {
bellard8df1cd02005-01-28 22:37:22 +00003410 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003411 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003412 switch (endian) {
3413 case DEVICE_LITTLE_ENDIAN:
3414 stl_le_p(ptr, val);
3415 break;
3416 case DEVICE_BIG_ENDIAN:
3417 stl_be_p(ptr, val);
3418 break;
3419 default:
3420 stl_p(ptr, val);
3421 break;
3422 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003423 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003424 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003425 }
Peter Maydell50013112015-04-26 16:49:24 +01003426 if (result) {
3427 *result = r;
3428 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003429 if (release_lock) {
3430 qemu_mutex_unlock_iothread();
3431 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003432 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003433}
3434
3435void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3436 MemTxAttrs attrs, MemTxResult *result)
3437{
3438 address_space_stl_internal(as, addr, val, attrs, result,
3439 DEVICE_NATIVE_ENDIAN);
3440}
3441
3442void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3443 MemTxAttrs attrs, MemTxResult *result)
3444{
3445 address_space_stl_internal(as, addr, val, attrs, result,
3446 DEVICE_LITTLE_ENDIAN);
3447}
3448
3449void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3450 MemTxAttrs attrs, MemTxResult *result)
3451{
3452 address_space_stl_internal(as, addr, val, attrs, result,
3453 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003454}
3455
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003456void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003457{
Peter Maydell50013112015-04-26 16:49:24 +01003458 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003459}
3460
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003461void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003462{
Peter Maydell50013112015-04-26 16:49:24 +01003463 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003464}
3465
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003466void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003467{
Peter Maydell50013112015-04-26 16:49:24 +01003468 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003469}
3470
bellardaab33092005-10-30 20:48:42 +00003471/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003472void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3473 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003474{
3475 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003476 MemTxResult r;
3477
3478 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3479 if (result) {
3480 *result = r;
3481 }
3482}
3483
3484void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3485{
3486 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003487}
3488
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003489/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003490static inline void address_space_stw_internal(AddressSpace *as,
3491 hwaddr addr, uint32_t val,
3492 MemTxAttrs attrs,
3493 MemTxResult *result,
3494 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003495{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003496 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003497 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003498 hwaddr l = 2;
3499 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003500 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003501 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003502
Paolo Bonzini41063e12015-03-18 14:21:43 +01003503 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003504 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003505 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003506 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003507
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003508#if defined(TARGET_WORDS_BIGENDIAN)
3509 if (endian == DEVICE_LITTLE_ENDIAN) {
3510 val = bswap16(val);
3511 }
3512#else
3513 if (endian == DEVICE_BIG_ENDIAN) {
3514 val = bswap16(val);
3515 }
3516#endif
Peter Maydell50013112015-04-26 16:49:24 +01003517 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003518 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003519 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003520 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003521 switch (endian) {
3522 case DEVICE_LITTLE_ENDIAN:
3523 stw_le_p(ptr, val);
3524 break;
3525 case DEVICE_BIG_ENDIAN:
3526 stw_be_p(ptr, val);
3527 break;
3528 default:
3529 stw_p(ptr, val);
3530 break;
3531 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003532 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003533 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003534 }
Peter Maydell50013112015-04-26 16:49:24 +01003535 if (result) {
3536 *result = r;
3537 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003538 if (release_lock) {
3539 qemu_mutex_unlock_iothread();
3540 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003541 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003542}
3543
3544void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3545 MemTxAttrs attrs, MemTxResult *result)
3546{
3547 address_space_stw_internal(as, addr, val, attrs, result,
3548 DEVICE_NATIVE_ENDIAN);
3549}
3550
3551void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3552 MemTxAttrs attrs, MemTxResult *result)
3553{
3554 address_space_stw_internal(as, addr, val, attrs, result,
3555 DEVICE_LITTLE_ENDIAN);
3556}
3557
3558void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3559 MemTxAttrs attrs, MemTxResult *result)
3560{
3561 address_space_stw_internal(as, addr, val, attrs, result,
3562 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003563}
3564
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003565void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003566{
Peter Maydell50013112015-04-26 16:49:24 +01003567 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003568}
3569
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003570void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003571{
Peter Maydell50013112015-04-26 16:49:24 +01003572 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003573}
3574
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003575void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003576{
Peter Maydell50013112015-04-26 16:49:24 +01003577 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003578}
3579
bellardaab33092005-10-30 20:48:42 +00003580/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003581void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3582 MemTxAttrs attrs, MemTxResult *result)
3583{
3584 MemTxResult r;
3585 val = tswap64(val);
3586 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3587 if (result) {
3588 *result = r;
3589 }
3590}
3591
3592void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3593 MemTxAttrs attrs, MemTxResult *result)
3594{
3595 MemTxResult r;
3596 val = cpu_to_le64(val);
3597 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3598 if (result) {
3599 *result = r;
3600 }
3601}
3602void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3603 MemTxAttrs attrs, MemTxResult *result)
3604{
3605 MemTxResult r;
3606 val = cpu_to_be64(val);
3607 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3608 if (result) {
3609 *result = r;
3610 }
3611}
3612
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003613void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003614{
Peter Maydell50013112015-04-26 16:49:24 +01003615 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003616}
3617
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003618void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003619{
Peter Maydell50013112015-04-26 16:49:24 +01003620 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003621}
3622
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003623void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003624{
Peter Maydell50013112015-04-26 16:49:24 +01003625 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003626}
3627
aliguori5e2972f2009-03-28 17:51:36 +00003628/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003629int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003630 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003631{
3632 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003633 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003634 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003635
3636 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003637 int asidx;
3638 MemTxAttrs attrs;
3639
bellard13eb76e2004-01-24 15:23:36 +00003640 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003641 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3642 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003643 /* if no physical page mapped, return an error */
3644 if (phys_addr == -1)
3645 return -1;
3646 l = (page + TARGET_PAGE_SIZE) - addr;
3647 if (l > len)
3648 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003649 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003650 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003651 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3652 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003653 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003654 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3655 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003656 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003657 }
bellard13eb76e2004-01-24 15:23:36 +00003658 len -= l;
3659 buf += l;
3660 addr += l;
3661 }
3662 return 0;
3663}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003664
3665/*
3666 * Allows code that needs to deal with migration bitmaps etc to still be built
3667 * target independent.
3668 */
3669size_t qemu_target_page_bits(void)
3670{
3671 return TARGET_PAGE_BITS;
3672}
3673
Paul Brooka68fe892010-03-01 00:08:59 +00003674#endif
bellard13eb76e2004-01-24 15:23:36 +00003675
Blue Swirl8e4a4242013-01-06 18:30:17 +00003676/*
3677 * A helper function for the _utterly broken_ virtio device model to find out if
3678 * it's running on a big endian machine. Don't do this at home kids!
3679 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003680bool target_words_bigendian(void);
3681bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003682{
3683#if defined(TARGET_WORDS_BIGENDIAN)
3684 return true;
3685#else
3686 return false;
3687#endif
3688}
3689
Wen Congyang76f35532012-05-07 12:04:18 +08003690#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003691bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003692{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003693 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003694 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003695 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003696
Paolo Bonzini41063e12015-03-18 14:21:43 +01003697 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003698 mr = address_space_translate(&address_space_memory,
3699 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003700
Paolo Bonzini41063e12015-03-18 14:21:43 +01003701 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3702 rcu_read_unlock();
3703 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003704}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003705
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003706int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003707{
3708 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003709 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003710
Mike Day0dc3f442013-09-05 14:41:35 -04003711 rcu_read_lock();
3712 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003713 ret = func(block->idstr, block->host, block->offset,
3714 block->used_length, opaque);
3715 if (ret) {
3716 break;
3717 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003718 }
Mike Day0dc3f442013-09-05 14:41:35 -04003719 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003720 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003721}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003722#endif