blob: 011babd5844d12d84b6f1b36592afbed5b533d2a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200190 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
193 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200194 }
195}
196
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198{
199 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200200 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 PhysPageEntry e;
202 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200204 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200206 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200207 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208
209 e.skip = leaf ? 0 : 1;
210 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100211 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200212 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200213 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200214 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215}
216
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200217static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
218 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200219 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200220{
221 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100222 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200224 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200225 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200226 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100228 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200229
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200231 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200232 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200233 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200234 *index += step;
235 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200236 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200237 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200238 }
239 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200240 }
241}
242
Avi Kivityac1970f2012-10-03 16:22:53 +0200243static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200244 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200245 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000246{
Avi Kivity29990972012-02-13 20:21:20 +0200247 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200248 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000249
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000251}
252
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200253/* Compact a non leaf page entry. Simply detect that the entry has a single child,
254 * and update our entry so we can skip it and go directly to the destination.
255 */
256static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
257{
258 unsigned valid_ptr = P_L2_SIZE;
259 int valid = 0;
260 PhysPageEntry *p;
261 int i;
262
263 if (lp->ptr == PHYS_MAP_NODE_NIL) {
264 return;
265 }
266
267 p = nodes[lp->ptr];
268 for (i = 0; i < P_L2_SIZE; i++) {
269 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
270 continue;
271 }
272
273 valid_ptr = i;
274 valid++;
275 if (p[i].skip) {
276 phys_page_compact(&p[i], nodes, compacted);
277 }
278 }
279
280 /* We can only compress if there's only one child. */
281 if (valid != 1) {
282 return;
283 }
284
285 assert(valid_ptr < P_L2_SIZE);
286
287 /* Don't compress if it won't fit in the # of bits we have. */
288 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
289 return;
290 }
291
292 lp->ptr = p[valid_ptr].ptr;
293 if (!p[valid_ptr].skip) {
294 /* If our only child is a leaf, make this a leaf. */
295 /* By design, we should have made this node a leaf to begin with so we
296 * should never reach here.
297 * But since it's so simple to handle this, let's do it just in case we
298 * change this rule.
299 */
300 lp->skip = 0;
301 } else {
302 lp->skip += p[valid_ptr].skip;
303 }
304}
305
306static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
307{
308 DECLARE_BITMAP(compacted, nodes_nb);
309
310 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200311 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200312 }
313}
314
Fam Zheng29cb5332016-03-01 14:18:23 +0800315static inline bool section_covers_addr(const MemoryRegionSection *section,
316 hwaddr addr)
317{
318 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
319 * the section must cover the entire address space.
320 */
321 return section->size.hi ||
322 range_covers_byte(section->offset_within_address_space,
323 section->size.lo, addr);
324}
325
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200326static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200327 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000328{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200329 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200330 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200332
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200333 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200334 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200335 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200336 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100338 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200339 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200340
Fam Zheng29cb5332016-03-01 14:18:23 +0800341 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342 return &sections[lp.ptr];
343 } else {
344 return &sections[PHYS_SECTION_UNASSIGNED];
345 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200346}
347
Blue Swirle5548612012-04-21 13:08:33 +0000348bool memory_region_is_unassigned(MemoryRegion *mr)
349{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200350 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000351 && mr != &io_mem_watch;
352}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200353
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100354/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200355static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 hwaddr addr,
357 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200358{
Fam Zheng729633c2016-03-01 14:18:24 +0800359 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800361 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200362
Fam Zheng729633c2016-03-01 14:18:24 +0800363 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
364 section_covers_addr(section, addr)) {
365 update = false;
366 } else {
367 section = phys_page_find(d->phys_map, addr, d->map.nodes,
368 d->map.sections);
369 update = true;
370 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200371 if (resolve_subpage && section->mr->subpage) {
372 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200373 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200374 }
Fam Zheng729633c2016-03-01 14:18:24 +0800375 if (update) {
376 atomic_set(&d->mru_section, section);
377 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200378 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200379}
380
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100381/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200382static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200383address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200384 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200385{
386 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200387 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100388 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200389
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200390 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391 /* Compute offset within MemoryRegionSection */
392 addr -= section->offset_within_address_space;
393
394 /* Compute offset within MemoryRegion */
395 *xlat = addr + section->offset_within_region;
396
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200397 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200398
399 /* MMIO registers can be expected to perform full-width accesses based only
400 * on their address, without considering adjacent registers that could
401 * decode to completely different MemoryRegions. When such registers
402 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
403 * regions overlap wildly. For this reason we cannot clamp the accesses
404 * here.
405 *
406 * If the length is small (as is the case for address_space_ldl/stl),
407 * everything works fine. If the incoming length is large, however,
408 * the caller really has to do the clamping through memory_access_size.
409 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200410 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200411 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
413 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200414 return section;
415}
Jan Kiszka90260c62013-05-26 21:46:51 +0200416
Paolo Bonzini41063e12015-03-18 14:21:43 +0100417/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200418MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
419 hwaddr *xlat, hwaddr *plen,
420 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200421{
Avi Kivity30951152012-10-30 13:47:46 +0200422 IOMMUTLBEntry iotlb;
423 MemoryRegionSection *section;
424 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200425
426 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100427 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
428 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200429 mr = section->mr;
430
431 if (!mr->iommu_ops) {
432 break;
433 }
434
Le Tan8d7b8cb2014-08-16 13:55:37 +0800435 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200436 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
437 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700438 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200439 if (!(iotlb.perm & (1 << is_write))) {
440 mr = &io_mem_unassigned;
441 break;
442 }
443
444 as = iotlb.target_as;
445 }
446
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000447 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100448 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700449 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 }
451
Avi Kivity30951152012-10-30 13:47:46 +0200452 *xlat = addr;
453 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200454}
455
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100456/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200457MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000458address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200459 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200460{
Avi Kivity30951152012-10-30 13:47:46 +0200461 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000462 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
463
464 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200465
466 assert(!section->mr->iommu_ops);
467 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200468}
bellard9fa3e852004-01-04 18:06:42 +0000469#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000470
Andreas Färberb170fce2013-01-20 20:23:22 +0100471#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000472
Juan Quintelae59fb372009-09-29 22:48:21 +0200473static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474{
Andreas Färber259186a2013-01-17 18:51:17 +0100475 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476
aurel323098dba2009-03-07 21:28:24 +0000477 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
478 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100479 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100480 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000481
482 return 0;
483}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200484
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485static int cpu_common_pre_load(void *opaque)
486{
487 CPUState *cpu = opaque;
488
Paolo Bonziniadee6422014-12-19 12:53:14 +0100489 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400490
491 return 0;
492}
493
494static bool cpu_common_exception_index_needed(void *opaque)
495{
496 CPUState *cpu = opaque;
497
Paolo Bonziniadee6422014-12-19 12:53:14 +0100498 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400499}
500
501static const VMStateDescription vmstate_cpu_common_exception_index = {
502 .name = "cpu_common/exception_index",
503 .version_id = 1,
504 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200505 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400506 .fields = (VMStateField[]) {
507 VMSTATE_INT32(exception_index, CPUState),
508 VMSTATE_END_OF_LIST()
509 }
510};
511
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300512static bool cpu_common_crash_occurred_needed(void *opaque)
513{
514 CPUState *cpu = opaque;
515
516 return cpu->crash_occurred;
517}
518
519static const VMStateDescription vmstate_cpu_common_crash_occurred = {
520 .name = "cpu_common/crash_occurred",
521 .version_id = 1,
522 .minimum_version_id = 1,
523 .needed = cpu_common_crash_occurred_needed,
524 .fields = (VMStateField[]) {
525 VMSTATE_BOOL(crash_occurred, CPUState),
526 VMSTATE_END_OF_LIST()
527 }
528};
529
Andreas Färber1a1562f2013-06-17 04:09:11 +0200530const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531 .name = "cpu_common",
532 .version_id = 1,
533 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400534 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200536 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100537 VMSTATE_UINT32(halted, CPUState),
538 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200539 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200541 .subsections = (const VMStateDescription*[]) {
542 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300543 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200544 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200545 }
546};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200547
pbrook9656f322008-07-01 20:01:19 +0000548#endif
549
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100550CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400551{
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400553
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100555 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 }
Glauber Costa950f1472009-06-09 12:15:18 -0400558 }
559
Andreas Färberbdc44642013-06-24 23:50:24 +0200560 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400561}
562
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000563#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000564void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000566 CPUAddressSpace *newas;
567
568 /* Target code should have set num_ases before calling us */
569 assert(asidx < cpu->num_ases);
570
Peter Maydell56943e82016-01-21 14:15:04 +0000571 if (asidx == 0) {
572 /* address space 0 gets the convenience alias */
573 cpu->as = as;
574 }
575
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000576 /* KVM cannot currently support multiple address spaces. */
577 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000578
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000579 if (!cpu->cpu_ases) {
580 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000581 }
Peter Maydell32857f42015-10-01 15:29:50 +0100582
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000583 newas = &cpu->cpu_ases[asidx];
584 newas->cpu = cpu;
585 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000586 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000587 newas->tcg_as_listener.commit = tcg_commit;
588 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000589 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000590}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000591
592AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
593{
594 /* Return the AddressSpace corresponding to the specified index */
595 return cpu->cpu_ases[asidx].as;
596}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#endif
598
Bharata B Raob7bca732015-06-23 19:31:13 -0700599#ifndef CONFIG_USER_ONLY
600static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
601
602static int cpu_get_free_index(Error **errp)
603{
604 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
605
606 if (cpu >= MAX_CPUMASK_BITS) {
607 error_setg(errp, "Trying to use more CPUs than max of %d",
608 MAX_CPUMASK_BITS);
609 return -1;
610 }
611
612 bitmap_set(cpu_index_map, cpu, 1);
613 return cpu;
614}
615
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530616static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700617{
Bharata B Raob7bca732015-06-23 19:31:13 -0700618 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
Bharata B Raob7bca732015-06-23 19:31:13 -0700619}
620#else
621
622static int cpu_get_free_index(Error **errp)
623{
624 CPUState *some_cpu;
625 int cpu_index = 0;
626
627 CPU_FOREACH(some_cpu) {
628 cpu_index++;
629 }
630 return cpu_index;
631}
632
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530633static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700634{
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530635 return;
Bharata B Raob7bca732015-06-23 19:31:13 -0700636}
637#endif
638
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530639void cpu_exec_exit(CPUState *cpu)
640{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530641 CPUClass *cc = CPU_GET_CLASS(cpu);
642
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530643#if defined(CONFIG_USER_ONLY)
644 cpu_list_lock();
645#endif
646 if (cpu->cpu_index == -1) {
647 /* cpu_index was never allocated by this @cpu or was already freed. */
648#if defined(CONFIG_USER_ONLY)
649 cpu_list_unlock();
650#endif
651 return;
652 }
653
654 QTAILQ_REMOVE(&cpus, cpu, node);
655 cpu_release_index(cpu);
656 cpu->cpu_index = -1;
657#if defined(CONFIG_USER_ONLY)
658 cpu_list_unlock();
659#endif
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530660
661 if (cc->vmsd != NULL) {
662 vmstate_unregister(NULL, cc->vmsd, cpu);
663 }
664 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
665 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
666 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530667}
668
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700669void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000670{
Andreas Färberb170fce2013-01-20 20:23:22 +0100671 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700672 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000673
Peter Maydell56943e82016-01-21 14:15:04 +0000674 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000675 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000676
Eduardo Habkost291135b2015-04-27 17:00:33 -0300677#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300678 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000679
680 /* This is a softmmu CPU object, so create a property for it
681 * so users can wire up its memory. (This can't go in qom/cpu.c
682 * because that file is compiled only once for both user-mode
683 * and system builds.) The default if no link is set up is to use
684 * the system address space.
685 */
686 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
687 (Object **)&cpu->memory,
688 qdev_prop_allow_set_link_before_realize,
689 OBJ_PROP_LINK_UNREF_ON_RELEASE,
690 &error_abort);
691 cpu->memory = system_memory;
692 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300693#endif
694
pbrookc2764712009-03-07 15:24:59 +0000695#if defined(CONFIG_USER_ONLY)
696 cpu_list_lock();
697#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200698 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700699 if (local_err) {
700 error_propagate(errp, local_err);
701#if defined(CONFIG_USER_ONLY)
702 cpu_list_unlock();
703#endif
704 return;
bellard6a00d602005-11-21 23:25:50 +0000705 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200706 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000707#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200708 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000709 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200710#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200711 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200712 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200713 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100714 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200715 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100716 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200717#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000718}
719
Paul Brook94df27f2010-02-28 23:47:45 +0000720#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200721static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000722{
723 tb_invalidate_phys_page_range(pc, pc + 1, 0);
724}
725#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200726static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400727{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000728 MemTxAttrs attrs;
729 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
730 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400731 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000732 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100733 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400734 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400735}
bellardc27004e2005-01-03 23:35:10 +0000736#endif
bellardd720b932004-04-25 17:57:43 +0000737
Paul Brookc527ee82010-03-01 03:31:14 +0000738#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200739void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000740
741{
742}
743
Peter Maydell3ee887e2014-09-12 14:06:48 +0100744int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
745 int flags)
746{
747 return -ENOSYS;
748}
749
750void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
751{
752}
753
Andreas Färber75a34032013-09-02 16:57:02 +0200754int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000755 int flags, CPUWatchpoint **watchpoint)
756{
757 return -ENOSYS;
758}
759#else
pbrook6658ffb2007-03-16 23:58:11 +0000760/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200761int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000762 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000763{
aliguoric0ce9982008-11-25 22:13:57 +0000764 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000765
Peter Maydell05068c02014-09-12 14:06:48 +0100766 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700767 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200768 error_report("tried to set invalid watchpoint at %"
769 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000770 return -EINVAL;
771 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500772 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000773
aliguoria1d1bb32008-11-18 20:07:32 +0000774 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100775 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000776 wp->flags = flags;
777
aliguori2dc9f412008-11-18 20:56:59 +0000778 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200779 if (flags & BP_GDB) {
780 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
781 } else {
782 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
783 }
aliguoria1d1bb32008-11-18 20:07:32 +0000784
Andreas Färber31b030d2013-09-04 01:29:02 +0200785 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000786
787 if (watchpoint)
788 *watchpoint = wp;
789 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000790}
791
aliguoria1d1bb32008-11-18 20:07:32 +0000792/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200793int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000794 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000795{
aliguoria1d1bb32008-11-18 20:07:32 +0000796 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000797
Andreas Färberff4700b2013-08-26 18:23:18 +0200798 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100799 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000800 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200801 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000802 return 0;
803 }
804 }
aliguoria1d1bb32008-11-18 20:07:32 +0000805 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000806}
807
aliguoria1d1bb32008-11-18 20:07:32 +0000808/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200809void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000810{
Andreas Färberff4700b2013-08-26 18:23:18 +0200811 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000812
Andreas Färber31b030d2013-09-04 01:29:02 +0200813 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000814
Anthony Liguori7267c092011-08-20 22:09:37 -0500815 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000816}
817
aliguoria1d1bb32008-11-18 20:07:32 +0000818/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200819void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000820{
aliguoric0ce9982008-11-25 22:13:57 +0000821 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000822
Andreas Färberff4700b2013-08-26 18:23:18 +0200823 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200824 if (wp->flags & mask) {
825 cpu_watchpoint_remove_by_ref(cpu, wp);
826 }
aliguoric0ce9982008-11-25 22:13:57 +0000827 }
aliguoria1d1bb32008-11-18 20:07:32 +0000828}
Peter Maydell05068c02014-09-12 14:06:48 +0100829
830/* Return true if this watchpoint address matches the specified
831 * access (ie the address range covered by the watchpoint overlaps
832 * partially or completely with the address range covered by the
833 * access).
834 */
835static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
836 vaddr addr,
837 vaddr len)
838{
839 /* We know the lengths are non-zero, but a little caution is
840 * required to avoid errors in the case where the range ends
841 * exactly at the top of the address space and so addr + len
842 * wraps round to zero.
843 */
844 vaddr wpend = wp->vaddr + wp->len - 1;
845 vaddr addrend = addr + len - 1;
846
847 return !(addr > wpend || wp->vaddr > addrend);
848}
849
Paul Brookc527ee82010-03-01 03:31:14 +0000850#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000851
852/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200853int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000854 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000855{
aliguoric0ce9982008-11-25 22:13:57 +0000856 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000857
Anthony Liguori7267c092011-08-20 22:09:37 -0500858 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000859
860 bp->pc = pc;
861 bp->flags = flags;
862
aliguori2dc9f412008-11-18 20:56:59 +0000863 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200864 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200865 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200866 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200867 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200868 }
aliguoria1d1bb32008-11-18 20:07:32 +0000869
Andreas Färberf0c3c502013-08-26 21:22:53 +0200870 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000871
Andreas Färber00b941e2013-06-29 18:55:54 +0200872 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000873 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200874 }
aliguoria1d1bb32008-11-18 20:07:32 +0000875 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000876}
877
878/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200879int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000880{
aliguoria1d1bb32008-11-18 20:07:32 +0000881 CPUBreakpoint *bp;
882
Andreas Färberf0c3c502013-08-26 21:22:53 +0200883 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000884 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200885 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000886 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000887 }
bellard4c3a88a2003-07-26 12:06:08 +0000888 }
aliguoria1d1bb32008-11-18 20:07:32 +0000889 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000890}
891
aliguoria1d1bb32008-11-18 20:07:32 +0000892/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200893void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000894{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200895 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
896
897 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000898
Anthony Liguori7267c092011-08-20 22:09:37 -0500899 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000900}
901
902/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200903void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000904{
aliguoric0ce9982008-11-25 22:13:57 +0000905 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000906
Andreas Färberf0c3c502013-08-26 21:22:53 +0200907 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200908 if (bp->flags & mask) {
909 cpu_breakpoint_remove_by_ref(cpu, bp);
910 }
aliguoric0ce9982008-11-25 22:13:57 +0000911 }
bellard4c3a88a2003-07-26 12:06:08 +0000912}
913
bellardc33a3462003-07-29 20:50:33 +0000914/* enable or disable single step mode. EXCP_DEBUG is returned by the
915 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200916void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000917{
Andreas Färbered2803d2013-06-21 20:20:45 +0200918 if (cpu->singlestep_enabled != enabled) {
919 cpu->singlestep_enabled = enabled;
920 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200921 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200922 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100923 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000924 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700925 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000926 }
bellardc33a3462003-07-29 20:50:33 +0000927 }
bellardc33a3462003-07-29 20:50:33 +0000928}
929
Andreas Färbera47dddd2013-09-03 17:38:47 +0200930void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000931{
932 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000933 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000934
935 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000936 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000937 fprintf(stderr, "qemu: fatal: ");
938 vfprintf(stderr, fmt, ap);
939 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200940 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100941 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000942 qemu_log("qemu: fatal: ");
943 qemu_log_vprintf(fmt, ap2);
944 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200945 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000946 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000947 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000948 }
pbrook493ae1f2007-11-23 16:53:59 +0000949 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000950 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300951 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200952#if defined(CONFIG_USER_ONLY)
953 {
954 struct sigaction act;
955 sigfillset(&act.sa_mask);
956 act.sa_handler = SIG_DFL;
957 sigaction(SIGABRT, &act, NULL);
958 }
959#endif
bellard75012672003-06-21 13:11:07 +0000960 abort();
961}
962
bellard01243112004-01-04 15:48:17 +0000963#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400964/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200965static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
966{
967 RAMBlock *block;
968
Paolo Bonzini43771532013-09-09 17:58:40 +0200969 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200970 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200971 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200972 }
Mike Day0dc3f442013-09-05 14:41:35 -0400973 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200974 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200975 goto found;
976 }
977 }
978
979 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
980 abort();
981
982found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200983 /* It is safe to write mru_block outside the iothread lock. This
984 * is what happens:
985 *
986 * mru_block = xxx
987 * rcu_read_unlock()
988 * xxx removed from list
989 * rcu_read_lock()
990 * read mru_block
991 * mru_block = NULL;
992 * call_rcu(reclaim_ramblock, xxx);
993 * rcu_read_unlock()
994 *
995 * atomic_rcu_set is not needed here. The block was already published
996 * when it was placed into the list. Here we're just making an extra
997 * copy of the pointer.
998 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200999 ram_list.mru_block = block;
1000 return block;
1001}
1002
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001003static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001004{
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001005 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +02001006 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001007 RAMBlock *block;
1008 ram_addr_t end;
1009
1010 end = TARGET_PAGE_ALIGN(start + length);
1011 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +00001012
Mike Day0dc3f442013-09-05 14:41:35 -04001013 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +02001014 block = qemu_get_ram_block(start);
1015 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001016 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001017 CPU_FOREACH(cpu) {
1018 tlb_reset_dirty(cpu, start1, length);
1019 }
Mike Day0dc3f442013-09-05 14:41:35 -04001020 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +02001021}
1022
1023/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001024bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1025 ram_addr_t length,
1026 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001027{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001028 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001029 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001030 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001031
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001032 if (length == 0) {
1033 return false;
1034 }
1035
1036 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1037 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001038
1039 rcu_read_lock();
1040
1041 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1042
1043 while (page < end) {
1044 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1045 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1046 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1047
1048 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1049 offset, num);
1050 page += num;
1051 }
1052
1053 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001054
1055 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001056 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001057 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001058
1059 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001060}
1061
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001062/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001063hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001064 MemoryRegionSection *section,
1065 target_ulong vaddr,
1066 hwaddr paddr, hwaddr xlat,
1067 int prot,
1068 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001069{
Avi Kivitya8170e52012-10-23 12:30:10 +02001070 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001071 CPUWatchpoint *wp;
1072
Blue Swirlcc5bea62012-04-14 14:56:48 +00001073 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001074 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001075 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001076 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001077 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001078 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001079 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001080 }
1081 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001082 AddressSpaceDispatch *d;
1083
1084 d = atomic_rcu_read(&section->address_space->dispatch);
1085 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001086 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001087 }
1088
1089 /* Make accesses to pages with watchpoints go via the
1090 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001091 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001092 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001093 /* Avoid trapping reads of pages with a write breakpoint. */
1094 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001095 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001096 *address |= TLB_MMIO;
1097 break;
1098 }
1099 }
1100 }
1101
1102 return iotlb;
1103}
bellard9fa3e852004-01-04 18:06:42 +00001104#endif /* defined(CONFIG_USER_ONLY) */
1105
pbrooke2eef172008-06-08 01:09:01 +00001106#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001107
Anthony Liguoric227f092009-10-01 16:12:16 -05001108static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001109 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001110static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001111
Igor Mammedova2b257d2014-10-31 16:38:37 +00001112static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1113 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001114
1115/*
1116 * Set a custom physical guest memory alloator.
1117 * Accelerators with unusual needs may need this. Hopefully, we can
1118 * get rid of it eventually.
1119 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001120void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001121{
1122 phys_mem_alloc = alloc;
1123}
1124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001125static uint16_t phys_section_add(PhysPageMap *map,
1126 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001127{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001128 /* The physical section number is ORed with a page-aligned
1129 * pointer to produce the iotlb entries. Thus it should
1130 * never overflow into the page-aligned value.
1131 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001132 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001133
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001134 if (map->sections_nb == map->sections_nb_alloc) {
1135 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1136 map->sections = g_renew(MemoryRegionSection, map->sections,
1137 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001138 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001139 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001140 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001141 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001142}
1143
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001144static void phys_section_destroy(MemoryRegion *mr)
1145{
Don Slutz55b4e802015-11-30 17:11:04 -05001146 bool have_sub_page = mr->subpage;
1147
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001148 memory_region_unref(mr);
1149
Don Slutz55b4e802015-11-30 17:11:04 -05001150 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001151 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001152 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001153 g_free(subpage);
1154 }
1155}
1156
Paolo Bonzini60926662013-05-29 12:30:26 +02001157static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001158{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001159 while (map->sections_nb > 0) {
1160 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001161 phys_section_destroy(section->mr);
1162 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001163 g_free(map->sections);
1164 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001165}
1166
Avi Kivityac1970f2012-10-03 16:22:53 +02001167static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001168{
1169 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001170 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001171 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001172 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001173 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001174 MemoryRegionSection subsection = {
1175 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001176 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001177 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001178 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001179
Avi Kivityf3705d52012-03-08 16:16:34 +02001180 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001181
Avi Kivityf3705d52012-03-08 16:16:34 +02001182 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001183 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001184 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001185 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001186 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001187 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001188 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001189 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001190 }
1191 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001192 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001193 subpage_register(subpage, start, end,
1194 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001195}
1196
1197
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198static void register_multipage(AddressSpaceDispatch *d,
1199 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001200{
Avi Kivitya8170e52012-10-23 12:30:10 +02001201 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001202 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001203 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1204 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001205
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001206 assert(num_pages);
1207 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001208}
1209
Avi Kivityac1970f2012-10-03 16:22:53 +02001210static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001211{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001212 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001213 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001214 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001215 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001216
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001217 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1218 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1219 - now.offset_within_address_space;
1220
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001221 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001222 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001223 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001224 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001225 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001226 while (int128_ne(remain.size, now.size)) {
1227 remain.size = int128_sub(remain.size, now.size);
1228 remain.offset_within_address_space += int128_get64(now.size);
1229 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001230 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001231 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001232 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001233 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001234 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001235 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001236 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001237 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001238 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001239 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001240 }
1241}
1242
Sheng Yang62a27442010-01-26 19:21:16 +08001243void qemu_flush_coalesced_mmio_buffer(void)
1244{
1245 if (kvm_enabled())
1246 kvm_flush_coalesced_mmio_buffer();
1247}
1248
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001249void qemu_mutex_lock_ramlist(void)
1250{
1251 qemu_mutex_lock(&ram_list.mutex);
1252}
1253
1254void qemu_mutex_unlock_ramlist(void)
1255{
1256 qemu_mutex_unlock(&ram_list.mutex);
1257}
1258
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001259#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001260static void *file_ram_alloc(RAMBlock *block,
1261 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001262 const char *path,
1263 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001265 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001266 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001267 char *sanitized_name;
1268 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001269 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001270 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001271 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001272
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001273 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1274 error_setg(errp,
1275 "host lacks kvm mmu notifiers, -mem-path unsupported");
1276 return NULL;
1277 }
1278
1279 for (;;) {
1280 fd = open(path, O_RDWR);
1281 if (fd >= 0) {
1282 /* @path names an existing file, use it */
1283 break;
1284 }
1285 if (errno == ENOENT) {
1286 /* @path names a file that doesn't exist, create it */
1287 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1288 if (fd >= 0) {
1289 unlink_on_error = true;
1290 break;
1291 }
1292 } else if (errno == EISDIR) {
1293 /* @path names a directory, create a file there */
1294 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1295 sanitized_name = g_strdup(memory_region_name(block->mr));
1296 for (c = sanitized_name; *c != '\0'; c++) {
1297 if (*c == '/') {
1298 *c = '_';
1299 }
1300 }
1301
1302 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1303 sanitized_name);
1304 g_free(sanitized_name);
1305
1306 fd = mkstemp(filename);
1307 if (fd >= 0) {
1308 unlink(filename);
1309 g_free(filename);
1310 break;
1311 }
1312 g_free(filename);
1313 }
1314 if (errno != EEXIST && errno != EINTR) {
1315 error_setg_errno(errp, errno,
1316 "can't open backing store %s for guest RAM",
1317 path);
1318 goto error;
1319 }
1320 /*
1321 * Try again on EINTR and EEXIST. The latter happens when
1322 * something else creates the file between our two open().
1323 */
1324 }
1325
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001326 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001327 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001328
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001329 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001330 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001331 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001332 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001333 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001334 }
1335
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001336 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001337
1338 /*
1339 * ftruncate is not supported by hugetlbfs in older
1340 * hosts, so don't bother bailing out on errors.
1341 * If anything goes wrong with it under other filesystems,
1342 * mmap will fail.
1343 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001344 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001345 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001346 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001347
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001348 area = qemu_ram_mmap(fd, memory, block->mr->align,
1349 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001350 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001351 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001352 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001353 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001354 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001355
1356 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001357 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001358 }
1359
Alex Williamson04b16652010-07-02 11:13:17 -06001360 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001361 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001362
1363error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001364 if (unlink_on_error) {
1365 unlink(path);
1366 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001367 if (fd != -1) {
1368 close(fd);
1369 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001370 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001371}
1372#endif
1373
Mike Day0dc3f442013-09-05 14:41:35 -04001374/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001375static ram_addr_t find_ram_offset(ram_addr_t size)
1376{
Alex Williamson04b16652010-07-02 11:13:17 -06001377 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001378 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001379
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001380 assert(size != 0); /* it would hand out same offset multiple times */
1381
Mike Day0dc3f442013-09-05 14:41:35 -04001382 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001383 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001384 }
Alex Williamson04b16652010-07-02 11:13:17 -06001385
Mike Day0dc3f442013-09-05 14:41:35 -04001386 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001387 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001388
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001389 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001390
Mike Day0dc3f442013-09-05 14:41:35 -04001391 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001392 if (next_block->offset >= end) {
1393 next = MIN(next, next_block->offset);
1394 }
1395 }
1396 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001397 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001398 mingap = next - end;
1399 }
1400 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001401
1402 if (offset == RAM_ADDR_MAX) {
1403 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1404 (uint64_t)size);
1405 abort();
1406 }
1407
Alex Williamson04b16652010-07-02 11:13:17 -06001408 return offset;
1409}
1410
Juan Quintela652d7ec2012-07-20 10:37:54 +02001411ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001412{
Alex Williamsond17b5282010-06-25 11:08:38 -06001413 RAMBlock *block;
1414 ram_addr_t last = 0;
1415
Mike Day0dc3f442013-09-05 14:41:35 -04001416 rcu_read_lock();
1417 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001418 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001419 }
Mike Day0dc3f442013-09-05 14:41:35 -04001420 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001421 return last;
1422}
1423
Jason Baronddb97f12012-08-02 15:44:16 -04001424static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1425{
1426 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001427
1428 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001429 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001430 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1431 if (ret) {
1432 perror("qemu_madvise");
1433 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1434 "but dump_guest_core=off specified\n");
1435 }
1436 }
1437}
1438
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001439const char *qemu_ram_get_idstr(RAMBlock *rb)
1440{
1441 return rb->idstr;
1442}
1443
Mike Dayae3a7042013-09-05 14:41:35 -04001444/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001445void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001446{
Gongleifa53a0e2016-05-10 10:04:59 +08001447 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001448
Avi Kivityc5705a72011-12-20 15:59:12 +02001449 assert(new_block);
1450 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001451
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001452 if (dev) {
1453 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001454 if (id) {
1455 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001456 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001457 }
1458 }
1459 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1460
Gongleiab0a9952016-05-10 10:05:00 +08001461 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001462 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001463 if (block != new_block &&
1464 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001465 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1466 new_block->idstr);
1467 abort();
1468 }
1469 }
Mike Day0dc3f442013-09-05 14:41:35 -04001470 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001471}
1472
Mike Dayae3a7042013-09-05 14:41:35 -04001473/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001474void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001475{
Mike Dayae3a7042013-09-05 14:41:35 -04001476 /* FIXME: arch_init.c assumes that this is not called throughout
1477 * migration. Ignore the problem since hot-unplug during migration
1478 * does not work anyway.
1479 */
Hu Tao20cfe882014-04-02 15:13:26 +08001480 if (block) {
1481 memset(block->idstr, 0, sizeof(block->idstr));
1482 }
1483}
1484
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001485static int memory_try_enable_merging(void *addr, size_t len)
1486{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001487 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001488 /* disabled by the user */
1489 return 0;
1490 }
1491
1492 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1493}
1494
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001495/* Only legal before guest might have detected the memory size: e.g. on
1496 * incoming migration, or right after reset.
1497 *
1498 * As memory core doesn't know how is memory accessed, it is up to
1499 * resize callback to update device state and/or add assertions to detect
1500 * misuse, if necessary.
1501 */
Gongleifa53a0e2016-05-10 10:04:59 +08001502int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001503{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001504 assert(block);
1505
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001506 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001507
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001508 if (block->used_length == newsize) {
1509 return 0;
1510 }
1511
1512 if (!(block->flags & RAM_RESIZEABLE)) {
1513 error_setg_errno(errp, EINVAL,
1514 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1515 " in != 0x" RAM_ADDR_FMT, block->idstr,
1516 newsize, block->used_length);
1517 return -EINVAL;
1518 }
1519
1520 if (block->max_length < newsize) {
1521 error_setg_errno(errp, EINVAL,
1522 "Length too large: %s: 0x" RAM_ADDR_FMT
1523 " > 0x" RAM_ADDR_FMT, block->idstr,
1524 newsize, block->max_length);
1525 return -EINVAL;
1526 }
1527
1528 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1529 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001530 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1531 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001532 memory_region_set_size(block->mr, newsize);
1533 if (block->resized) {
1534 block->resized(block->idstr, newsize, block->host);
1535 }
1536 return 0;
1537}
1538
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001539/* Called with ram_list.mutex held */
1540static void dirty_memory_extend(ram_addr_t old_ram_size,
1541 ram_addr_t new_ram_size)
1542{
1543 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1544 DIRTY_MEMORY_BLOCK_SIZE);
1545 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1546 DIRTY_MEMORY_BLOCK_SIZE);
1547 int i;
1548
1549 /* Only need to extend if block count increased */
1550 if (new_num_blocks <= old_num_blocks) {
1551 return;
1552 }
1553
1554 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1555 DirtyMemoryBlocks *old_blocks;
1556 DirtyMemoryBlocks *new_blocks;
1557 int j;
1558
1559 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1560 new_blocks = g_malloc(sizeof(*new_blocks) +
1561 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1562
1563 if (old_num_blocks) {
1564 memcpy(new_blocks->blocks, old_blocks->blocks,
1565 old_num_blocks * sizeof(old_blocks->blocks[0]));
1566 }
1567
1568 for (j = old_num_blocks; j < new_num_blocks; j++) {
1569 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1570 }
1571
1572 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1573
1574 if (old_blocks) {
1575 g_free_rcu(old_blocks, rcu);
1576 }
1577 }
1578}
1579
Fam Zheng528f46a2016-03-01 14:18:18 +08001580static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001581{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001582 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001583 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001584 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001585 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001586
1587 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001588
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001589 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001590 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001591
1592 if (!new_block->host) {
1593 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001594 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001595 new_block->mr, &err);
1596 if (err) {
1597 error_propagate(errp, err);
1598 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001599 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001600 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001601 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001602 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001603 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001604 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001605 error_setg_errno(errp, errno,
1606 "cannot set up guest memory '%s'",
1607 memory_region_name(new_block->mr));
1608 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001609 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001610 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001611 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001612 }
1613 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001614
Li Zhijiandd631692015-07-02 20:18:06 +08001615 new_ram_size = MAX(old_ram_size,
1616 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1617 if (new_ram_size > old_ram_size) {
1618 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001619 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001620 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001621 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1622 * QLIST (which has an RCU-friendly variant) does not have insertion at
1623 * tail, so save the last element in last_block.
1624 */
Mike Day0dc3f442013-09-05 14:41:35 -04001625 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001626 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001627 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001628 break;
1629 }
1630 }
1631 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001632 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001633 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001634 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001635 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001636 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001637 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001638 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001639
Mike Day0dc3f442013-09-05 14:41:35 -04001640 /* Write list before version */
1641 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001642 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001643 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001644
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001645 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001646 new_block->used_length,
1647 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001648
Paolo Bonzinia904c912015-01-21 16:18:35 +01001649 if (new_block->host) {
1650 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1651 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1652 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1653 if (kvm_enabled()) {
1654 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1655 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001656 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001657}
1658
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001659#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001660RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1661 bool share, const char *mem_path,
1662 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001663{
1664 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001665 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001666
1667 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001668 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001669 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001670 }
1671
1672 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1673 /*
1674 * file_ram_alloc() needs to allocate just like
1675 * phys_mem_alloc, but we haven't bothered to provide
1676 * a hook there.
1677 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001678 error_setg(errp,
1679 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001680 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001681 }
1682
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001683 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001684 new_block = g_malloc0(sizeof(*new_block));
1685 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001686 new_block->used_length = size;
1687 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001688 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001689 new_block->host = file_ram_alloc(new_block, size,
1690 mem_path, errp);
1691 if (!new_block->host) {
1692 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001693 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001694 }
1695
Fam Zheng528f46a2016-03-01 14:18:18 +08001696 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001697 if (local_err) {
1698 g_free(new_block);
1699 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001700 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001701 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001702 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001703}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001704#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001705
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001706static
Fam Zheng528f46a2016-03-01 14:18:18 +08001707RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1708 void (*resized)(const char*,
1709 uint64_t length,
1710 void *host),
1711 void *host, bool resizeable,
1712 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001713{
1714 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001715 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001716
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001717 size = HOST_PAGE_ALIGN(size);
1718 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001719 new_block = g_malloc0(sizeof(*new_block));
1720 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001721 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001722 new_block->used_length = size;
1723 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001724 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001725 new_block->fd = -1;
1726 new_block->host = host;
1727 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001728 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001729 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001730 if (resizeable) {
1731 new_block->flags |= RAM_RESIZEABLE;
1732 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001733 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001734 if (local_err) {
1735 g_free(new_block);
1736 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001737 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001738 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001739 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001740}
1741
Fam Zheng528f46a2016-03-01 14:18:18 +08001742RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001743 MemoryRegion *mr, Error **errp)
1744{
1745 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1746}
1747
Fam Zheng528f46a2016-03-01 14:18:18 +08001748RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001749{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001750 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1751}
1752
Fam Zheng528f46a2016-03-01 14:18:18 +08001753RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001754 void (*resized)(const char*,
1755 uint64_t length,
1756 void *host),
1757 MemoryRegion *mr, Error **errp)
1758{
1759 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001760}
bellarde9a1ab12007-02-08 23:08:38 +00001761
Paolo Bonzini43771532013-09-09 17:58:40 +02001762static void reclaim_ramblock(RAMBlock *block)
1763{
1764 if (block->flags & RAM_PREALLOC) {
1765 ;
1766 } else if (xen_enabled()) {
1767 xen_invalidate_map_cache_entry(block->host);
1768#ifndef _WIN32
1769 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001770 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001771 close(block->fd);
1772#endif
1773 } else {
1774 qemu_anon_ram_free(block->host, block->max_length);
1775 }
1776 g_free(block);
1777}
1778
Fam Zhengf1060c52016-03-01 14:18:22 +08001779void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001780{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001781 if (!block) {
1782 return;
1783 }
1784
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001785 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001786 QLIST_REMOVE_RCU(block, next);
1787 ram_list.mru_block = NULL;
1788 /* Write list before version */
1789 smp_wmb();
1790 ram_list.version++;
1791 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001792 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001793}
1794
Huang Yingcd19cfa2011-03-02 08:56:19 +01001795#ifndef _WIN32
1796void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1797{
1798 RAMBlock *block;
1799 ram_addr_t offset;
1800 int flags;
1801 void *area, *vaddr;
1802
Mike Day0dc3f442013-09-05 14:41:35 -04001803 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001804 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001805 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001806 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001807 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001809 } else if (xen_enabled()) {
1810 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001811 } else {
1812 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001813 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001814 flags |= (block->flags & RAM_SHARED ?
1815 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001816 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1817 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001818 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001819 /*
1820 * Remap needs to match alloc. Accelerators that
1821 * set phys_mem_alloc never remap. If they did,
1822 * we'd need a remap hook here.
1823 */
1824 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1825
Huang Yingcd19cfa2011-03-02 08:56:19 +01001826 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1827 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1828 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001829 }
1830 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001831 fprintf(stderr, "Could not remap addr: "
1832 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001833 length, addr);
1834 exit(1);
1835 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001836 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001837 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001838 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001839 }
1840 }
1841}
1842#endif /* !_WIN32 */
1843
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001844/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001845 * This should not be used for general purpose DMA. Use address_space_map
1846 * or address_space_rw instead. For local memory (e.g. video ram) that the
1847 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001848 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001849 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001850 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001851void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001852{
Gonglei3655cb92016-02-20 10:35:20 +08001853 RAMBlock *block = ram_block;
1854
1855 if (block == NULL) {
1856 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001857 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001858 }
Mike Dayae3a7042013-09-05 14:41:35 -04001859
1860 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001861 /* We need to check if the requested address is in the RAM
1862 * because we don't want to map the entire memory in QEMU.
1863 * In that case just map until the end of the page.
1864 */
1865 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001866 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001867 }
Mike Dayae3a7042013-09-05 14:41:35 -04001868
1869 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001870 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001871 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001872}
1873
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001874/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001875 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001876 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001877 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001878 */
Gonglei3655cb92016-02-20 10:35:20 +08001879static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1880 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001881{
Gonglei3655cb92016-02-20 10:35:20 +08001882 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001883 if (*size == 0) {
1884 return NULL;
1885 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001886
Gonglei3655cb92016-02-20 10:35:20 +08001887 if (block == NULL) {
1888 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001889 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001890 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001891 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001892
1893 if (xen_enabled() && block->host == NULL) {
1894 /* We need to check if the requested address is in the RAM
1895 * because we don't want to map the entire memory in QEMU.
1896 * In that case just map the requested area.
1897 */
1898 if (block->offset == 0) {
1899 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001900 }
1901
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001902 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001903 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001904
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001905 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001906}
1907
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001908/*
1909 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1910 * in that RAMBlock.
1911 *
1912 * ptr: Host pointer to look up
1913 * round_offset: If true round the result offset down to a page boundary
1914 * *ram_addr: set to result ram_addr
1915 * *offset: set to result offset within the RAMBlock
1916 *
1917 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001918 *
1919 * By the time this function returns, the returned pointer is not protected
1920 * by RCU anymore. If the caller is not within an RCU critical section and
1921 * does not hold the iothread lock, it must have other means of protecting the
1922 * pointer, such as a reference to the region that includes the incoming
1923 * ram_addr_t.
1924 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001925RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001926 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001927{
pbrook94a6b542009-04-11 17:15:54 +00001928 RAMBlock *block;
1929 uint8_t *host = ptr;
1930
Jan Kiszka868bb332011-06-21 22:59:09 +02001931 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001932 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001933 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001934 ram_addr = xen_ram_addr_from_mapcache(ptr);
1935 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001936 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001937 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938 }
Mike Day0dc3f442013-09-05 14:41:35 -04001939 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001940 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001941 }
1942
Mike Day0dc3f442013-09-05 14:41:35 -04001943 rcu_read_lock();
1944 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001945 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001946 goto found;
1947 }
1948
Mike Day0dc3f442013-09-05 14:41:35 -04001949 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001950 /* This case append when the block is not mapped. */
1951 if (block->host == NULL) {
1952 continue;
1953 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001954 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001955 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001956 }
pbrook94a6b542009-04-11 17:15:54 +00001957 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001958
Mike Day0dc3f442013-09-05 14:41:35 -04001959 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001960 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001961
1962found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001963 *offset = (host - block->host);
1964 if (round_offset) {
1965 *offset &= TARGET_PAGE_MASK;
1966 }
Mike Day0dc3f442013-09-05 14:41:35 -04001967 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001968 return block;
1969}
1970
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001971/*
1972 * Finds the named RAMBlock
1973 *
1974 * name: The name of RAMBlock to find
1975 *
1976 * Returns: RAMBlock (or NULL if not found)
1977 */
1978RAMBlock *qemu_ram_block_by_name(const char *name)
1979{
1980 RAMBlock *block;
1981
1982 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1983 if (!strcmp(name, block->idstr)) {
1984 return block;
1985 }
1986 }
1987
1988 return NULL;
1989}
1990
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001991/* Some of the softmmu routines need to translate from a host pointer
1992 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001993ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001994{
1995 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001996 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001997
Paolo Bonzinif615f392016-05-26 10:07:50 +02001998 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001999 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002000 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002001 }
2002
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002003 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002004}
Alex Williamsonf471a172010-06-11 11:11:42 -06002005
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002006/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002007static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002008 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002009{
Juan Quintela52159192013-10-08 12:44:04 +02002010 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002011 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002012 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002013 switch (size) {
2014 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002015 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002016 break;
2017 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002018 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002019 break;
2020 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002021 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002022 break;
2023 default:
2024 abort();
2025 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002026 /* Set both VGA and migration bits for simplicity and to remove
2027 * the notdirty callback faster.
2028 */
2029 cpu_physical_memory_set_dirty_range(ram_addr, size,
2030 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002031 /* we remove the notdirty callback only if the code has been
2032 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002033 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002034 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002035 }
bellard1ccde1c2004-02-06 19:46:14 +00002036}
2037
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002038static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2039 unsigned size, bool is_write)
2040{
2041 return is_write;
2042}
2043
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002044static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002045 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002046 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002047 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002048};
2049
pbrook0f459d12008-06-09 00:20:13 +00002050/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002051static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002052{
Andreas Färber93afead2013-08-26 03:41:01 +02002053 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002054 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002055 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002056 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002057 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002058 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002059 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002060
Andreas Färberff4700b2013-08-26 18:23:18 +02002061 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002062 /* We re-entered the check after replacing the TB. Now raise
2063 * the debug interrupt so that is will trigger after the
2064 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002065 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002066 return;
2067 }
Andreas Färber93afead2013-08-26 03:41:01 +02002068 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002069 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002070 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2071 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002072 if (flags == BP_MEM_READ) {
2073 wp->flags |= BP_WATCHPOINT_HIT_READ;
2074 } else {
2075 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2076 }
2077 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002078 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002079 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002080 if (wp->flags & BP_CPU &&
2081 !cc->debug_check_watchpoint(cpu, wp)) {
2082 wp->flags &= ~BP_WATCHPOINT_HIT;
2083 continue;
2084 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002085 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002086 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002087 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002088 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002089 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002090 } else {
2091 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002092 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002093 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002094 }
aliguori06d55cc2008-11-18 20:24:06 +00002095 }
aliguori6e140f22008-11-18 20:37:55 +00002096 } else {
2097 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002098 }
2099 }
2100}
2101
pbrook6658ffb2007-03-16 23:58:11 +00002102/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2103 so these check for a hit then pass through to the normal out-of-line
2104 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002105static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2106 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002107{
Peter Maydell66b9b432015-04-26 16:49:24 +01002108 MemTxResult res;
2109 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002110 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2111 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002112
Peter Maydell66b9b432015-04-26 16:49:24 +01002113 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002114 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002115 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002116 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002117 break;
2118 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002119 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002120 break;
2121 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002122 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002123 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002124 default: abort();
2125 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002126 *pdata = data;
2127 return res;
2128}
2129
2130static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2131 uint64_t val, unsigned size,
2132 MemTxAttrs attrs)
2133{
2134 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002135 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2136 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002137
2138 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2139 switch (size) {
2140 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002141 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002142 break;
2143 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002144 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002145 break;
2146 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002147 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002148 break;
2149 default: abort();
2150 }
2151 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002152}
2153
Avi Kivity1ec9b902012-01-02 12:47:48 +02002154static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002155 .read_with_attrs = watch_mem_read,
2156 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002157 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002158};
pbrook6658ffb2007-03-16 23:58:11 +00002159
Peter Maydellf25a49e2015-04-26 16:49:24 +01002160static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2161 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002162{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002163 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002164 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002165 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002166
blueswir1db7b5422007-05-26 17:36:03 +00002167#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002168 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002169 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002170#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002171 res = address_space_read(subpage->as, addr + subpage->base,
2172 attrs, buf, len);
2173 if (res) {
2174 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002175 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 switch (len) {
2177 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002178 *data = ldub_p(buf);
2179 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002180 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002181 *data = lduw_p(buf);
2182 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002183 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002184 *data = ldl_p(buf);
2185 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002186 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002187 *data = ldq_p(buf);
2188 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002189 default:
2190 abort();
2191 }
blueswir1db7b5422007-05-26 17:36:03 +00002192}
2193
Peter Maydellf25a49e2015-04-26 16:49:24 +01002194static MemTxResult subpage_write(void *opaque, hwaddr addr,
2195 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002196{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002197 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002198 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199
blueswir1db7b5422007-05-26 17:36:03 +00002200#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002201 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002202 " value %"PRIx64"\n",
2203 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002204#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002205 switch (len) {
2206 case 1:
2207 stb_p(buf, value);
2208 break;
2209 case 2:
2210 stw_p(buf, value);
2211 break;
2212 case 4:
2213 stl_p(buf, value);
2214 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002215 case 8:
2216 stq_p(buf, value);
2217 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002218 default:
2219 abort();
2220 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002221 return address_space_write(subpage->as, addr + subpage->base,
2222 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002223}
2224
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002225static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002226 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002227{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002228 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002229#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002230 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002231 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002232#endif
2233
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002234 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002235 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002236}
2237
Avi Kivity70c68e42012-01-02 12:32:48 +02002238static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002239 .read_with_attrs = subpage_read,
2240 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002241 .impl.min_access_size = 1,
2242 .impl.max_access_size = 8,
2243 .valid.min_access_size = 1,
2244 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002245 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002246 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002247};
2248
Anthony Liguoric227f092009-10-01 16:12:16 -05002249static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002250 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002251{
2252 int idx, eidx;
2253
2254 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2255 return -1;
2256 idx = SUBPAGE_IDX(start);
2257 eidx = SUBPAGE_IDX(end);
2258#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002259 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2260 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002261#endif
blueswir1db7b5422007-05-26 17:36:03 +00002262 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002263 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002264 }
2265
2266 return 0;
2267}
2268
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002269static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002270{
Anthony Liguoric227f092009-10-01 16:12:16 -05002271 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002272
Anthony Liguori7267c092011-08-20 22:09:37 -05002273 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002274
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002275 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002276 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002277 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002278 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002279 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002280#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002281 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2282 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002283#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002284 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002285
2286 return mmio;
2287}
2288
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002289static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2290 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002291{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002292 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002293 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002294 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002295 .mr = mr,
2296 .offset_within_address_space = 0,
2297 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002298 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002299 };
2300
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002301 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002302}
2303
Peter Maydella54c87b2016-01-21 14:15:05 +00002304MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002305{
Peter Maydella54c87b2016-01-21 14:15:05 +00002306 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2307 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002308 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002309 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002310
2311 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002312}
2313
Avi Kivitye9179ce2009-06-14 11:38:52 +03002314static void io_mem_init(void)
2315{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002316 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002317 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002318 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002319 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002320 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002321 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002322 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002323}
2324
Avi Kivityac1970f2012-10-03 16:22:53 +02002325static void mem_begin(MemoryListener *listener)
2326{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002327 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002328 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2329 uint16_t n;
2330
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002331 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002332 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002333 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002334 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002335 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002336 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002337 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002338 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002339
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002340 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002341 d->as = as;
2342 as->next_dispatch = d;
2343}
2344
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002345static void address_space_dispatch_free(AddressSpaceDispatch *d)
2346{
2347 phys_sections_free(&d->map);
2348 g_free(d);
2349}
2350
Paolo Bonzini00752702013-05-29 12:13:54 +02002351static void mem_commit(MemoryListener *listener)
2352{
2353 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002354 AddressSpaceDispatch *cur = as->dispatch;
2355 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002356
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002357 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002358
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002359 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002360 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002361 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002362 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002363}
2364
Avi Kivity1d711482012-10-02 18:54:45 +02002365static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002366{
Peter Maydell32857f42015-10-01 15:29:50 +01002367 CPUAddressSpace *cpuas;
2368 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002369
2370 /* since each CPU stores ram addresses in its TLB cache, we must
2371 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002372 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2373 cpu_reloading_memory_map();
2374 /* The CPU and TLB are protected by the iothread lock.
2375 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2376 * may have split the RCU critical section.
2377 */
2378 d = atomic_rcu_read(&cpuas->as->dispatch);
2379 cpuas->memory_dispatch = d;
2380 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002381}
2382
Avi Kivityac1970f2012-10-03 16:22:53 +02002383void address_space_init_dispatch(AddressSpace *as)
2384{
Paolo Bonzini00752702013-05-29 12:13:54 +02002385 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002386 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002387 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002388 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002389 .region_add = mem_add,
2390 .region_nop = mem_add,
2391 .priority = 0,
2392 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002393 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002394}
2395
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002396void address_space_unregister(AddressSpace *as)
2397{
2398 memory_listener_unregister(&as->dispatch_listener);
2399}
2400
Avi Kivity83f3c252012-10-07 12:59:55 +02002401void address_space_destroy_dispatch(AddressSpace *as)
2402{
2403 AddressSpaceDispatch *d = as->dispatch;
2404
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002405 atomic_rcu_set(&as->dispatch, NULL);
2406 if (d) {
2407 call_rcu(d, address_space_dispatch_free, rcu);
2408 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002409}
2410
Avi Kivity62152b82011-07-26 14:26:14 +03002411static void memory_map_init(void)
2412{
Anthony Liguori7267c092011-08-20 22:09:37 -05002413 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002414
Paolo Bonzini57271d62013-11-07 17:14:37 +01002415 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002416 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002417
Anthony Liguori7267c092011-08-20 22:09:37 -05002418 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002419 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2420 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002421 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002422}
2423
2424MemoryRegion *get_system_memory(void)
2425{
2426 return system_memory;
2427}
2428
Avi Kivity309cb472011-08-08 16:09:03 +03002429MemoryRegion *get_system_io(void)
2430{
2431 return system_io;
2432}
2433
pbrooke2eef172008-06-08 01:09:01 +00002434#endif /* !defined(CONFIG_USER_ONLY) */
2435
bellard13eb76e2004-01-24 15:23:36 +00002436/* physical memory access (slow version, mainly for debug) */
2437#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002438int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002439 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002440{
2441 int l, flags;
2442 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002443 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002444
2445 while (len > 0) {
2446 page = addr & TARGET_PAGE_MASK;
2447 l = (page + TARGET_PAGE_SIZE) - addr;
2448 if (l > len)
2449 l = len;
2450 flags = page_get_flags(page);
2451 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002452 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002453 if (is_write) {
2454 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002455 return -1;
bellard579a97f2007-11-11 14:26:47 +00002456 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002457 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002458 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002459 memcpy(p, buf, l);
2460 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002461 } else {
2462 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002463 return -1;
bellard579a97f2007-11-11 14:26:47 +00002464 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002465 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002466 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002467 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002468 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002469 }
2470 len -= l;
2471 buf += l;
2472 addr += l;
2473 }
Paul Brooka68fe892010-03-01 00:08:59 +00002474 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002475}
bellard8df1cd02005-01-28 22:37:22 +00002476
bellard13eb76e2004-01-24 15:23:36 +00002477#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002478
Paolo Bonzini845b6212015-03-23 11:45:53 +01002479static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002480 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002481{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002482 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002483 addr += memory_region_get_ram_addr(mr);
2484
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002485 /* No early return if dirty_log_mask is or becomes 0, because
2486 * cpu_physical_memory_set_dirty_range will still call
2487 * xen_modified_memory.
2488 */
2489 if (dirty_log_mask) {
2490 dirty_log_mask =
2491 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002492 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002493 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2494 tb_invalidate_phys_range(addr, addr + length);
2495 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2496 }
2497 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002498}
2499
Richard Henderson23326162013-07-08 14:55:59 -07002500static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002501{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002502 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002503
2504 /* Regions are assumed to support 1-4 byte accesses unless
2505 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002506 if (access_size_max == 0) {
2507 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002508 }
Richard Henderson23326162013-07-08 14:55:59 -07002509
2510 /* Bound the maximum access by the alignment of the address. */
2511 if (!mr->ops->impl.unaligned) {
2512 unsigned align_size_max = addr & -addr;
2513 if (align_size_max != 0 && align_size_max < access_size_max) {
2514 access_size_max = align_size_max;
2515 }
2516 }
2517
2518 /* Don't attempt accesses larger than the maximum. */
2519 if (l > access_size_max) {
2520 l = access_size_max;
2521 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002522 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002523
2524 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002525}
2526
Jan Kiszka4840f102015-06-18 18:47:22 +02002527static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002528{
Jan Kiszka4840f102015-06-18 18:47:22 +02002529 bool unlocked = !qemu_mutex_iothread_locked();
2530 bool release_lock = false;
2531
2532 if (unlocked && mr->global_locking) {
2533 qemu_mutex_lock_iothread();
2534 unlocked = false;
2535 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002536 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002537 if (mr->flush_coalesced_mmio) {
2538 if (unlocked) {
2539 qemu_mutex_lock_iothread();
2540 }
2541 qemu_flush_coalesced_mmio_buffer();
2542 if (unlocked) {
2543 qemu_mutex_unlock_iothread();
2544 }
2545 }
2546
2547 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002548}
2549
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002550/* Called within RCU critical section. */
2551static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2552 MemTxAttrs attrs,
2553 const uint8_t *buf,
2554 int len, hwaddr addr1,
2555 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002556{
bellard13eb76e2004-01-24 15:23:36 +00002557 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002558 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002559 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002560 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002561
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002562 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002563 if (!memory_access_is_direct(mr, true)) {
2564 release_lock |= prepare_mmio_access(mr);
2565 l = memory_access_size(mr, l, addr1);
2566 /* XXX: could force current_cpu to NULL to avoid
2567 potential bugs */
2568 switch (l) {
2569 case 8:
2570 /* 64 bit write access */
2571 val = ldq_p(buf);
2572 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2573 attrs);
2574 break;
2575 case 4:
2576 /* 32 bit write access */
2577 val = ldl_p(buf);
2578 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2579 attrs);
2580 break;
2581 case 2:
2582 /* 16 bit write access */
2583 val = lduw_p(buf);
2584 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2585 attrs);
2586 break;
2587 case 1:
2588 /* 8 bit write access */
2589 val = ldub_p(buf);
2590 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2591 attrs);
2592 break;
2593 default:
2594 abort();
bellard13eb76e2004-01-24 15:23:36 +00002595 }
2596 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002597 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002598 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002599 memcpy(ptr, buf, l);
2600 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002601 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002602
2603 if (release_lock) {
2604 qemu_mutex_unlock_iothread();
2605 release_lock = false;
2606 }
2607
bellard13eb76e2004-01-24 15:23:36 +00002608 len -= l;
2609 buf += l;
2610 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002611
2612 if (!len) {
2613 break;
2614 }
2615
2616 l = len;
2617 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002618 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002619
Peter Maydell3b643492015-04-26 16:49:23 +01002620 return result;
bellard13eb76e2004-01-24 15:23:36 +00002621}
bellard8df1cd02005-01-28 22:37:22 +00002622
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002623MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2624 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002625{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002626 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002627 hwaddr addr1;
2628 MemoryRegion *mr;
2629 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002630
2631 if (len > 0) {
2632 rcu_read_lock();
2633 l = len;
2634 mr = address_space_translate(as, addr, &addr1, &l, true);
2635 result = address_space_write_continue(as, addr, attrs, buf, len,
2636 addr1, l, mr);
2637 rcu_read_unlock();
2638 }
2639
2640 return result;
2641}
2642
2643/* Called within RCU critical section. */
2644MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2645 MemTxAttrs attrs, uint8_t *buf,
2646 int len, hwaddr addr1, hwaddr l,
2647 MemoryRegion *mr)
2648{
2649 uint8_t *ptr;
2650 uint64_t val;
2651 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002652 bool release_lock = false;
2653
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002654 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002655 if (!memory_access_is_direct(mr, false)) {
2656 /* I/O case */
2657 release_lock |= prepare_mmio_access(mr);
2658 l = memory_access_size(mr, l, addr1);
2659 switch (l) {
2660 case 8:
2661 /* 64 bit read access */
2662 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2663 attrs);
2664 stq_p(buf, val);
2665 break;
2666 case 4:
2667 /* 32 bit read access */
2668 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2669 attrs);
2670 stl_p(buf, val);
2671 break;
2672 case 2:
2673 /* 16 bit read access */
2674 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2675 attrs);
2676 stw_p(buf, val);
2677 break;
2678 case 1:
2679 /* 8 bit read access */
2680 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2681 attrs);
2682 stb_p(buf, val);
2683 break;
2684 default:
2685 abort();
2686 }
2687 } else {
2688 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002689 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002690 memcpy(buf, ptr, l);
2691 }
2692
2693 if (release_lock) {
2694 qemu_mutex_unlock_iothread();
2695 release_lock = false;
2696 }
2697
2698 len -= l;
2699 buf += l;
2700 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002701
2702 if (!len) {
2703 break;
2704 }
2705
2706 l = len;
2707 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002708 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002709
2710 return result;
2711}
2712
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002713MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2714 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002715{
2716 hwaddr l;
2717 hwaddr addr1;
2718 MemoryRegion *mr;
2719 MemTxResult result = MEMTX_OK;
2720
2721 if (len > 0) {
2722 rcu_read_lock();
2723 l = len;
2724 mr = address_space_translate(as, addr, &addr1, &l, false);
2725 result = address_space_read_continue(as, addr, attrs, buf, len,
2726 addr1, l, mr);
2727 rcu_read_unlock();
2728 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002729
2730 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002731}
2732
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002733MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2734 uint8_t *buf, int len, bool is_write)
2735{
2736 if (is_write) {
2737 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2738 } else {
2739 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2740 }
2741}
Avi Kivityac1970f2012-10-03 16:22:53 +02002742
Avi Kivitya8170e52012-10-23 12:30:10 +02002743void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002744 int len, int is_write)
2745{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002746 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2747 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002748}
2749
Alexander Graf582b55a2013-12-11 14:17:44 +01002750enum write_rom_type {
2751 WRITE_DATA,
2752 FLUSH_CACHE,
2753};
2754
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002755static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002756 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002757{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002758 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002759 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002760 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002761 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002762
Paolo Bonzini41063e12015-03-18 14:21:43 +01002763 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002764 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002765 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002766 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002767
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002768 if (!(memory_region_is_ram(mr) ||
2769 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002770 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002771 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002772 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002773 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002774 switch (type) {
2775 case WRITE_DATA:
2776 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002777 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002778 break;
2779 case FLUSH_CACHE:
2780 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2781 break;
2782 }
bellardd0ecd2a2006-04-23 17:14:48 +00002783 }
2784 len -= l;
2785 buf += l;
2786 addr += l;
2787 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002788 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002789}
2790
Alexander Graf582b55a2013-12-11 14:17:44 +01002791/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002792void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002793 const uint8_t *buf, int len)
2794{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002795 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002796}
2797
2798void cpu_flush_icache_range(hwaddr start, int len)
2799{
2800 /*
2801 * This function should do the same thing as an icache flush that was
2802 * triggered from within the guest. For TCG we are always cache coherent,
2803 * so there is no need to flush anything. For KVM / Xen we need to flush
2804 * the host's instruction cache at least.
2805 */
2806 if (tcg_enabled()) {
2807 return;
2808 }
2809
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002810 cpu_physical_memory_write_rom_internal(&address_space_memory,
2811 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002812}
2813
aliguori6d16c2f2009-01-22 16:59:11 +00002814typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002815 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002816 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002817 hwaddr addr;
2818 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002819 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002820} BounceBuffer;
2821
2822static BounceBuffer bounce;
2823
aliguoriba223c22009-01-22 16:59:16 +00002824typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002825 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002826 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002827} MapClient;
2828
Fam Zheng38e047b2015-03-16 17:03:35 +08002829QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002830static QLIST_HEAD(map_client_list, MapClient) map_client_list
2831 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002832
Fam Zhenge95205e2015-03-16 17:03:37 +08002833static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002834{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002835 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002836 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002837}
2838
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002839static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002840{
2841 MapClient *client;
2842
Blue Swirl72cf2d42009-09-12 07:36:22 +00002843 while (!QLIST_EMPTY(&map_client_list)) {
2844 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002845 qemu_bh_schedule(client->bh);
2846 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002847 }
2848}
2849
Fam Zhenge95205e2015-03-16 17:03:37 +08002850void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002851{
2852 MapClient *client = g_malloc(sizeof(*client));
2853
Fam Zheng38e047b2015-03-16 17:03:35 +08002854 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002855 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002856 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002857 if (!atomic_read(&bounce.in_use)) {
2858 cpu_notify_map_clients_locked();
2859 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002860 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002861}
2862
Fam Zheng38e047b2015-03-16 17:03:35 +08002863void cpu_exec_init_all(void)
2864{
2865 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002866 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002867 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002868 qemu_mutex_init(&map_client_list_lock);
2869}
2870
Fam Zhenge95205e2015-03-16 17:03:37 +08002871void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002872{
Fam Zhenge95205e2015-03-16 17:03:37 +08002873 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002874
Fam Zhenge95205e2015-03-16 17:03:37 +08002875 qemu_mutex_lock(&map_client_list_lock);
2876 QLIST_FOREACH(client, &map_client_list, link) {
2877 if (client->bh == bh) {
2878 cpu_unregister_map_client_do(client);
2879 break;
2880 }
2881 }
2882 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002883}
2884
2885static void cpu_notify_map_clients(void)
2886{
Fam Zheng38e047b2015-03-16 17:03:35 +08002887 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002888 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002889 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002890}
2891
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002892bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2893{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002894 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002895 hwaddr l, xlat;
2896
Paolo Bonzini41063e12015-03-18 14:21:43 +01002897 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002898 while (len > 0) {
2899 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002900 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2901 if (!memory_access_is_direct(mr, is_write)) {
2902 l = memory_access_size(mr, l, addr);
2903 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002904 return false;
2905 }
2906 }
2907
2908 len -= l;
2909 addr += l;
2910 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002911 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002912 return true;
2913}
2914
aliguori6d16c2f2009-01-22 16:59:11 +00002915/* Map a physical memory region into a host virtual address.
2916 * May map a subset of the requested range, given by and returned in *plen.
2917 * May return NULL if resources needed to perform the mapping are exhausted.
2918 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002919 * Use cpu_register_map_client() to know when retrying the map operation is
2920 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002921 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002922void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002923 hwaddr addr,
2924 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002925 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002926{
Avi Kivitya8170e52012-10-23 12:30:10 +02002927 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002928 hwaddr done = 0;
2929 hwaddr l, xlat, base;
2930 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002931 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002932
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002933 if (len == 0) {
2934 return NULL;
2935 }
aliguori6d16c2f2009-01-22 16:59:11 +00002936
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002937 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002938 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002939 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002940
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002941 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002942 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002943 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002944 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002945 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002946 /* Avoid unbounded allocations */
2947 l = MIN(l, TARGET_PAGE_SIZE);
2948 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002949 bounce.addr = addr;
2950 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002951
2952 memory_region_ref(mr);
2953 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002954 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002955 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2956 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002957 }
aliguori6d16c2f2009-01-22 16:59:11 +00002958
Paolo Bonzini41063e12015-03-18 14:21:43 +01002959 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002960 *plen = l;
2961 return bounce.buffer;
2962 }
2963
2964 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002965
2966 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002967 len -= l;
2968 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002969 done += l;
2970 if (len == 0) {
2971 break;
2972 }
2973
2974 l = len;
2975 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2976 if (this_mr != mr || xlat != base + done) {
2977 break;
2978 }
aliguori6d16c2f2009-01-22 16:59:11 +00002979 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002980
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002981 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002982 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002983 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002984 rcu_read_unlock();
2985
2986 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002987}
2988
Avi Kivityac1970f2012-10-03 16:22:53 +02002989/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002990 * Will also mark the memory as dirty if is_write == 1. access_len gives
2991 * the amount of memory that was actually read or written by the caller.
2992 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002993void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2994 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002995{
2996 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002997 MemoryRegion *mr;
2998 ram_addr_t addr1;
2999
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01003000 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003001 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003002 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003003 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003004 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003005 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003006 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003007 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003008 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003009 return;
3010 }
3011 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003012 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3013 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003014 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003015 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003016 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003017 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003018 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003019 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003020}
bellardd0ecd2a2006-04-23 17:14:48 +00003021
Avi Kivitya8170e52012-10-23 12:30:10 +02003022void *cpu_physical_memory_map(hwaddr addr,
3023 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003024 int is_write)
3025{
3026 return address_space_map(&address_space_memory, addr, plen, is_write);
3027}
3028
Avi Kivitya8170e52012-10-23 12:30:10 +02003029void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3030 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003031{
3032 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3033}
3034
bellard8df1cd02005-01-28 22:37:22 +00003035/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003036static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3037 MemTxAttrs attrs,
3038 MemTxResult *result,
3039 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003040{
bellard8df1cd02005-01-28 22:37:22 +00003041 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003042 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003043 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003044 hwaddr l = 4;
3045 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003046 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003047 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003048
Paolo Bonzini41063e12015-03-18 14:21:43 +01003049 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003050 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003051 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003052 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003053
bellard8df1cd02005-01-28 22:37:22 +00003054 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003055 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003056#if defined(TARGET_WORDS_BIGENDIAN)
3057 if (endian == DEVICE_LITTLE_ENDIAN) {
3058 val = bswap32(val);
3059 }
3060#else
3061 if (endian == DEVICE_BIG_ENDIAN) {
3062 val = bswap32(val);
3063 }
3064#endif
bellard8df1cd02005-01-28 22:37:22 +00003065 } else {
3066 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003067 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003068 switch (endian) {
3069 case DEVICE_LITTLE_ENDIAN:
3070 val = ldl_le_p(ptr);
3071 break;
3072 case DEVICE_BIG_ENDIAN:
3073 val = ldl_be_p(ptr);
3074 break;
3075 default:
3076 val = ldl_p(ptr);
3077 break;
3078 }
Peter Maydell50013112015-04-26 16:49:24 +01003079 r = MEMTX_OK;
3080 }
3081 if (result) {
3082 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003083 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003084 if (release_lock) {
3085 qemu_mutex_unlock_iothread();
3086 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003087 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003088 return val;
3089}
3090
Peter Maydell50013112015-04-26 16:49:24 +01003091uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3092 MemTxAttrs attrs, MemTxResult *result)
3093{
3094 return address_space_ldl_internal(as, addr, attrs, result,
3095 DEVICE_NATIVE_ENDIAN);
3096}
3097
3098uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3099 MemTxAttrs attrs, MemTxResult *result)
3100{
3101 return address_space_ldl_internal(as, addr, attrs, result,
3102 DEVICE_LITTLE_ENDIAN);
3103}
3104
3105uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3106 MemTxAttrs attrs, MemTxResult *result)
3107{
3108 return address_space_ldl_internal(as, addr, attrs, result,
3109 DEVICE_BIG_ENDIAN);
3110}
3111
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003112uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003113{
Peter Maydell50013112015-04-26 16:49:24 +01003114 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003115}
3116
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003117uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003118{
Peter Maydell50013112015-04-26 16:49:24 +01003119 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003120}
3121
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003122uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003123{
Peter Maydell50013112015-04-26 16:49:24 +01003124 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003125}
3126
bellard84b7b8e2005-11-28 21:19:04 +00003127/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003128static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3129 MemTxAttrs attrs,
3130 MemTxResult *result,
3131 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003132{
bellard84b7b8e2005-11-28 21:19:04 +00003133 uint8_t *ptr;
3134 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003135 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003136 hwaddr l = 8;
3137 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003138 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003139 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003140
Paolo Bonzini41063e12015-03-18 14:21:43 +01003141 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003142 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003143 false);
3144 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003145 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003146
bellard84b7b8e2005-11-28 21:19:04 +00003147 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003148 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003149#if defined(TARGET_WORDS_BIGENDIAN)
3150 if (endian == DEVICE_LITTLE_ENDIAN) {
3151 val = bswap64(val);
3152 }
3153#else
3154 if (endian == DEVICE_BIG_ENDIAN) {
3155 val = bswap64(val);
3156 }
3157#endif
bellard84b7b8e2005-11-28 21:19:04 +00003158 } else {
3159 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003160 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161 switch (endian) {
3162 case DEVICE_LITTLE_ENDIAN:
3163 val = ldq_le_p(ptr);
3164 break;
3165 case DEVICE_BIG_ENDIAN:
3166 val = ldq_be_p(ptr);
3167 break;
3168 default:
3169 val = ldq_p(ptr);
3170 break;
3171 }
Peter Maydell50013112015-04-26 16:49:24 +01003172 r = MEMTX_OK;
3173 }
3174 if (result) {
3175 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003176 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003177 if (release_lock) {
3178 qemu_mutex_unlock_iothread();
3179 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003180 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003181 return val;
3182}
3183
Peter Maydell50013112015-04-26 16:49:24 +01003184uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3185 MemTxAttrs attrs, MemTxResult *result)
3186{
3187 return address_space_ldq_internal(as, addr, attrs, result,
3188 DEVICE_NATIVE_ENDIAN);
3189}
3190
3191uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3192 MemTxAttrs attrs, MemTxResult *result)
3193{
3194 return address_space_ldq_internal(as, addr, attrs, result,
3195 DEVICE_LITTLE_ENDIAN);
3196}
3197
3198uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3199 MemTxAttrs attrs, MemTxResult *result)
3200{
3201 return address_space_ldq_internal(as, addr, attrs, result,
3202 DEVICE_BIG_ENDIAN);
3203}
3204
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003205uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003206{
Peter Maydell50013112015-04-26 16:49:24 +01003207 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003208}
3209
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003210uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003211{
Peter Maydell50013112015-04-26 16:49:24 +01003212 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003213}
3214
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003215uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003216{
Peter Maydell50013112015-04-26 16:49:24 +01003217 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003218}
3219
bellardaab33092005-10-30 20:48:42 +00003220/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003221uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3222 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003223{
3224 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003225 MemTxResult r;
3226
3227 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3228 if (result) {
3229 *result = r;
3230 }
bellardaab33092005-10-30 20:48:42 +00003231 return val;
3232}
3233
Peter Maydell50013112015-04-26 16:49:24 +01003234uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3235{
3236 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3237}
3238
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003239/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003240static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3241 hwaddr addr,
3242 MemTxAttrs attrs,
3243 MemTxResult *result,
3244 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003245{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003246 uint8_t *ptr;
3247 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003248 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003249 hwaddr l = 2;
3250 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003251 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003252 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003253
Paolo Bonzini41063e12015-03-18 14:21:43 +01003254 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003255 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003256 false);
3257 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003258 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003259
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003260 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003261 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003262#if defined(TARGET_WORDS_BIGENDIAN)
3263 if (endian == DEVICE_LITTLE_ENDIAN) {
3264 val = bswap16(val);
3265 }
3266#else
3267 if (endian == DEVICE_BIG_ENDIAN) {
3268 val = bswap16(val);
3269 }
3270#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003271 } else {
3272 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003273 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003274 switch (endian) {
3275 case DEVICE_LITTLE_ENDIAN:
3276 val = lduw_le_p(ptr);
3277 break;
3278 case DEVICE_BIG_ENDIAN:
3279 val = lduw_be_p(ptr);
3280 break;
3281 default:
3282 val = lduw_p(ptr);
3283 break;
3284 }
Peter Maydell50013112015-04-26 16:49:24 +01003285 r = MEMTX_OK;
3286 }
3287 if (result) {
3288 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003289 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003290 if (release_lock) {
3291 qemu_mutex_unlock_iothread();
3292 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003293 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003294 return val;
bellardaab33092005-10-30 20:48:42 +00003295}
3296
Peter Maydell50013112015-04-26 16:49:24 +01003297uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3298 MemTxAttrs attrs, MemTxResult *result)
3299{
3300 return address_space_lduw_internal(as, addr, attrs, result,
3301 DEVICE_NATIVE_ENDIAN);
3302}
3303
3304uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3305 MemTxAttrs attrs, MemTxResult *result)
3306{
3307 return address_space_lduw_internal(as, addr, attrs, result,
3308 DEVICE_LITTLE_ENDIAN);
3309}
3310
3311uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3312 MemTxAttrs attrs, MemTxResult *result)
3313{
3314 return address_space_lduw_internal(as, addr, attrs, result,
3315 DEVICE_BIG_ENDIAN);
3316}
3317
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003318uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003319{
Peter Maydell50013112015-04-26 16:49:24 +01003320 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003321}
3322
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003323uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003324{
Peter Maydell50013112015-04-26 16:49:24 +01003325 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003326}
3327
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003328uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003329{
Peter Maydell50013112015-04-26 16:49:24 +01003330 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003331}
3332
bellard8df1cd02005-01-28 22:37:22 +00003333/* warning: addr must be aligned. The ram page is not masked as dirty
3334 and the code inside is not invalidated. It is useful if the dirty
3335 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003336void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3337 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003338{
bellard8df1cd02005-01-28 22:37:22 +00003339 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003340 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003341 hwaddr l = 4;
3342 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003343 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003344 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003345 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003346
Paolo Bonzini41063e12015-03-18 14:21:43 +01003347 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003348 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003349 true);
3350 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003351 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003352
Peter Maydell50013112015-04-26 16:49:24 +01003353 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003354 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003355 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003356 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003357
Paolo Bonzini845b6212015-03-23 11:45:53 +01003358 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3359 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003360 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3361 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003362 r = MEMTX_OK;
3363 }
3364 if (result) {
3365 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003366 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003367 if (release_lock) {
3368 qemu_mutex_unlock_iothread();
3369 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003370 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003371}
3372
Peter Maydell50013112015-04-26 16:49:24 +01003373void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3374{
3375 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3376}
3377
bellard8df1cd02005-01-28 22:37:22 +00003378/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003379static inline void address_space_stl_internal(AddressSpace *as,
3380 hwaddr addr, uint32_t val,
3381 MemTxAttrs attrs,
3382 MemTxResult *result,
3383 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003384{
bellard8df1cd02005-01-28 22:37:22 +00003385 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003386 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003387 hwaddr l = 4;
3388 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003389 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003390 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003391
Paolo Bonzini41063e12015-03-18 14:21:43 +01003392 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003393 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003394 true);
3395 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003396 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003397
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003398#if defined(TARGET_WORDS_BIGENDIAN)
3399 if (endian == DEVICE_LITTLE_ENDIAN) {
3400 val = bswap32(val);
3401 }
3402#else
3403 if (endian == DEVICE_BIG_ENDIAN) {
3404 val = bswap32(val);
3405 }
3406#endif
Peter Maydell50013112015-04-26 16:49:24 +01003407 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003408 } else {
bellard8df1cd02005-01-28 22:37:22 +00003409 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003410 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003411 switch (endian) {
3412 case DEVICE_LITTLE_ENDIAN:
3413 stl_le_p(ptr, val);
3414 break;
3415 case DEVICE_BIG_ENDIAN:
3416 stl_be_p(ptr, val);
3417 break;
3418 default:
3419 stl_p(ptr, val);
3420 break;
3421 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003422 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003423 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003424 }
Peter Maydell50013112015-04-26 16:49:24 +01003425 if (result) {
3426 *result = r;
3427 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003428 if (release_lock) {
3429 qemu_mutex_unlock_iothread();
3430 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003431 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003432}
3433
3434void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3435 MemTxAttrs attrs, MemTxResult *result)
3436{
3437 address_space_stl_internal(as, addr, val, attrs, result,
3438 DEVICE_NATIVE_ENDIAN);
3439}
3440
3441void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3442 MemTxAttrs attrs, MemTxResult *result)
3443{
3444 address_space_stl_internal(as, addr, val, attrs, result,
3445 DEVICE_LITTLE_ENDIAN);
3446}
3447
3448void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3449 MemTxAttrs attrs, MemTxResult *result)
3450{
3451 address_space_stl_internal(as, addr, val, attrs, result,
3452 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003453}
3454
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003455void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003456{
Peter Maydell50013112015-04-26 16:49:24 +01003457 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003458}
3459
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003460void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003461{
Peter Maydell50013112015-04-26 16:49:24 +01003462 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003463}
3464
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003465void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003466{
Peter Maydell50013112015-04-26 16:49:24 +01003467 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003468}
3469
bellardaab33092005-10-30 20:48:42 +00003470/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003471void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3472 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003473{
3474 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003475 MemTxResult r;
3476
3477 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3478 if (result) {
3479 *result = r;
3480 }
3481}
3482
3483void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3484{
3485 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003486}
3487
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003488/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003489static inline void address_space_stw_internal(AddressSpace *as,
3490 hwaddr addr, uint32_t val,
3491 MemTxAttrs attrs,
3492 MemTxResult *result,
3493 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003494{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003495 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003496 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003497 hwaddr l = 2;
3498 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003499 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003500 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003501
Paolo Bonzini41063e12015-03-18 14:21:43 +01003502 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003503 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003504 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003505 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003506
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507#if defined(TARGET_WORDS_BIGENDIAN)
3508 if (endian == DEVICE_LITTLE_ENDIAN) {
3509 val = bswap16(val);
3510 }
3511#else
3512 if (endian == DEVICE_BIG_ENDIAN) {
3513 val = bswap16(val);
3514 }
3515#endif
Peter Maydell50013112015-04-26 16:49:24 +01003516 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003517 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003518 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003519 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003520 switch (endian) {
3521 case DEVICE_LITTLE_ENDIAN:
3522 stw_le_p(ptr, val);
3523 break;
3524 case DEVICE_BIG_ENDIAN:
3525 stw_be_p(ptr, val);
3526 break;
3527 default:
3528 stw_p(ptr, val);
3529 break;
3530 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003531 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003532 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003533 }
Peter Maydell50013112015-04-26 16:49:24 +01003534 if (result) {
3535 *result = r;
3536 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003537 if (release_lock) {
3538 qemu_mutex_unlock_iothread();
3539 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003540 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003541}
3542
3543void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3544 MemTxAttrs attrs, MemTxResult *result)
3545{
3546 address_space_stw_internal(as, addr, val, attrs, result,
3547 DEVICE_NATIVE_ENDIAN);
3548}
3549
3550void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3551 MemTxAttrs attrs, MemTxResult *result)
3552{
3553 address_space_stw_internal(as, addr, val, attrs, result,
3554 DEVICE_LITTLE_ENDIAN);
3555}
3556
3557void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3558 MemTxAttrs attrs, MemTxResult *result)
3559{
3560 address_space_stw_internal(as, addr, val, attrs, result,
3561 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003562}
3563
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003564void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003565{
Peter Maydell50013112015-04-26 16:49:24 +01003566 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003567}
3568
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003569void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003570{
Peter Maydell50013112015-04-26 16:49:24 +01003571 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003572}
3573
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003574void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003575{
Peter Maydell50013112015-04-26 16:49:24 +01003576 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003577}
3578
bellardaab33092005-10-30 20:48:42 +00003579/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003580void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3581 MemTxAttrs attrs, MemTxResult *result)
3582{
3583 MemTxResult r;
3584 val = tswap64(val);
3585 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3586 if (result) {
3587 *result = r;
3588 }
3589}
3590
3591void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3592 MemTxAttrs attrs, MemTxResult *result)
3593{
3594 MemTxResult r;
3595 val = cpu_to_le64(val);
3596 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3597 if (result) {
3598 *result = r;
3599 }
3600}
3601void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3602 MemTxAttrs attrs, MemTxResult *result)
3603{
3604 MemTxResult r;
3605 val = cpu_to_be64(val);
3606 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3607 if (result) {
3608 *result = r;
3609 }
3610}
3611
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003612void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003613{
Peter Maydell50013112015-04-26 16:49:24 +01003614 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003615}
3616
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003617void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003618{
Peter Maydell50013112015-04-26 16:49:24 +01003619 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003620}
3621
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003622void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003623{
Peter Maydell50013112015-04-26 16:49:24 +01003624 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003625}
3626
aliguori5e2972f2009-03-28 17:51:36 +00003627/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003628int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003629 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003630{
3631 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003632 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003633 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003634
3635 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003636 int asidx;
3637 MemTxAttrs attrs;
3638
bellard13eb76e2004-01-24 15:23:36 +00003639 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003640 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3641 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003642 /* if no physical page mapped, return an error */
3643 if (phys_addr == -1)
3644 return -1;
3645 l = (page + TARGET_PAGE_SIZE) - addr;
3646 if (l > len)
3647 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003648 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003649 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003650 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3651 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003652 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003653 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3654 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003655 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003656 }
bellard13eb76e2004-01-24 15:23:36 +00003657 len -= l;
3658 buf += l;
3659 addr += l;
3660 }
3661 return 0;
3662}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003663
3664/*
3665 * Allows code that needs to deal with migration bitmaps etc to still be built
3666 * target independent.
3667 */
3668size_t qemu_target_page_bits(void)
3669{
3670 return TARGET_PAGE_BITS;
3671}
3672
Paul Brooka68fe892010-03-01 00:08:59 +00003673#endif
bellard13eb76e2004-01-24 15:23:36 +00003674
Blue Swirl8e4a4242013-01-06 18:30:17 +00003675/*
3676 * A helper function for the _utterly broken_ virtio device model to find out if
3677 * it's running on a big endian machine. Don't do this at home kids!
3678 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003679bool target_words_bigendian(void);
3680bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003681{
3682#if defined(TARGET_WORDS_BIGENDIAN)
3683 return true;
3684#else
3685 return false;
3686#endif
3687}
3688
Wen Congyang76f35532012-05-07 12:04:18 +08003689#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003690bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003691{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003692 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003693 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003694 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003695
Paolo Bonzini41063e12015-03-18 14:21:43 +01003696 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003697 mr = address_space_translate(&address_space_memory,
3698 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003699
Paolo Bonzini41063e12015-03-18 14:21:43 +01003700 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3701 rcu_read_unlock();
3702 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003703}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003704
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003705int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003706{
3707 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003708 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003709
Mike Day0dc3f442013-09-05 14:41:35 -04003710 rcu_read_lock();
3711 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003712 ret = func(block->idstr, block->host, block->offset,
3713 block->used_length, opaque);
3714 if (ret) {
3715 break;
3716 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003717 }
Mike Day0dc3f442013-09-05 14:41:35 -04003718 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003719 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003720}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003721#endif