blob: 963b91a018b757a65a2b992e9536f1126e050d99 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Peter Maydell20bccb82016-10-24 16:26:49 +010096#ifdef TARGET_PAGE_BITS_VARY
97int target_page_bits;
98bool target_page_bits_decided;
99#endif
100
Andreas Färberbdc44642013-06-24 23:50:24 +0200101struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +0000102/* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +0200104__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000105/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000106 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000107 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100108int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000109
Peter Maydell20bccb82016-10-24 16:26:49 +0100110bool set_preferred_target_page_bits(int bits)
111{
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
115 * a particular size.
116 */
117#ifdef TARGET_PAGE_BITS_VARY
118 assert(bits >= TARGET_PAGE_BITS_MIN);
119 if (target_page_bits == 0 || target_page_bits > bits) {
120 if (target_page_bits_decided) {
121 return false;
122 }
123 target_page_bits = bits;
124 }
125#endif
126 return true;
127}
128
pbrooke2eef172008-06-08 01:09:01 +0000129#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200130
Peter Maydell20bccb82016-10-24 16:26:49 +0100131static void finalize_target_page_bits(void)
132{
133#ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits == 0) {
135 target_page_bits = TARGET_PAGE_BITS_MIN;
136 }
137 target_page_bits_decided = true;
138#endif
139}
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141typedef struct PhysPageEntry PhysPageEntry;
142
143struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200145 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200147 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200150#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
151
Paolo Bonzini03f49952013-11-07 17:14:36 +0100152/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100153#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100154
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200155#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100156#define P_L2_SIZE (1 << P_L2_BITS)
157
158#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
159
160typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200161
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100163 struct rcu_head rcu;
164
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 unsigned sections_nb;
166 unsigned sections_nb_alloc;
167 unsigned nodes_nb;
168 unsigned nodes_nb_alloc;
169 Node *nodes;
170 MemoryRegionSection *sections;
171} PhysPageMap;
172
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200173struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100174 struct rcu_head rcu;
175
Fam Zheng729633c2016-03-01 14:18:24 +0800176 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
179 */
180 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200182 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200183};
184
Jan Kiszka90260c62013-05-26 21:46:51 +0200185#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186typedef struct subpage_t {
187 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200188 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200189 hwaddr base;
Vijaya Kumar K2615fab2016-10-24 16:26:49 +0100190 uint16_t sub_section[];
Jan Kiszka90260c62013-05-26 21:46:51 +0200191} subpage_t;
192
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200193#define PHYS_SECTION_UNASSIGNED 0
194#define PHYS_SECTION_NOTDIRTY 1
195#define PHYS_SECTION_ROM 2
196#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
pbrooke2eef172008-06-08 01:09:01 +0000198static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300199static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000200static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000201
Avi Kivity1ec9b902012-01-02 12:47:48 +0200202static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100203
204/**
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
210 */
211struct CPUAddressSpace {
212 CPUState *cpu;
213 AddressSpace *as;
214 struct AddressSpaceDispatch *memory_dispatch;
215 MemoryListener tcg_as_listener;
216};
217
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard54936002003-05-13 00:25:15 +0000219
Paul Brook6d9a1302010-02-28 23:55:53 +0000220#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200221
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200222static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223{
Peter Lieven101420b2016-07-15 12:03:50 +0200224 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200225 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200226 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
228 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200229 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200230 }
231}
232
Paolo Bonzinidb946042015-05-21 15:12:29 +0200233static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200234{
235 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200236 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200237 PhysPageEntry e;
238 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200239
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200240 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200241 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200242 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200244
245 e.skip = leaf ? 0 : 1;
246 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100247 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200248 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200249 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200250 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200251}
252
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200253static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
254 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200255 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200256{
257 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100258 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200259
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200260 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200261 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200262 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200263 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100264 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200265
Paolo Bonzini03f49952013-11-07 17:14:36 +0100266 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200267 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200268 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200269 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200270 *index += step;
271 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200272 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200273 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200274 }
275 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200276 }
277}
278
Avi Kivityac1970f2012-10-03 16:22:53 +0200279static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200280 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200281 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000282{
Avi Kivity29990972012-02-13 20:21:20 +0200283 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200284 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000285
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200286 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000287}
288
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200289/* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
291 */
Marc-André Lureauefee6782016-09-28 16:37:20 +0400292static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200293{
294 unsigned valid_ptr = P_L2_SIZE;
295 int valid = 0;
296 PhysPageEntry *p;
297 int i;
298
299 if (lp->ptr == PHYS_MAP_NODE_NIL) {
300 return;
301 }
302
303 p = nodes[lp->ptr];
304 for (i = 0; i < P_L2_SIZE; i++) {
305 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
306 continue;
307 }
308
309 valid_ptr = i;
310 valid++;
311 if (p[i].skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400312 phys_page_compact(&p[i], nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200313 }
314 }
315
316 /* We can only compress if there's only one child. */
317 if (valid != 1) {
318 return;
319 }
320
321 assert(valid_ptr < P_L2_SIZE);
322
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
325 return;
326 }
327
328 lp->ptr = p[valid_ptr].ptr;
329 if (!p[valid_ptr].skip) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
334 * change this rule.
335 */
336 lp->skip = 0;
337 } else {
338 lp->skip += p[valid_ptr].skip;
339 }
340}
341
342static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
343{
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 if (d->phys_map.skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400345 phys_page_compact(&d->phys_map, d->map.nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200346 }
347}
348
Fam Zheng29cb5332016-03-01 14:18:23 +0800349static inline bool section_covers_addr(const MemoryRegionSection *section,
350 hwaddr addr)
351{
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
354 */
355 return section->size.hi ||
356 range_covers_byte(section->offset_within_address_space,
357 section->size.lo, addr);
358}
359
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200360static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200361 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000362{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200363 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200364 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200365 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200366
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200367 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200368 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200369 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200370 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200371 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100372 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200373 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200374
Fam Zheng29cb5332016-03-01 14:18:23 +0800375 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200376 return &sections[lp.ptr];
377 } else {
378 return &sections[PHYS_SECTION_UNASSIGNED];
379 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200380}
381
Blue Swirle5548612012-04-21 13:08:33 +0000382bool memory_region_is_unassigned(MemoryRegion *mr)
383{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200384 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000385 && mr != &io_mem_watch;
386}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100388/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200390 hwaddr addr,
391 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200392{
Fam Zheng729633c2016-03-01 14:18:24 +0800393 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200394 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800395 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200396
Fam Zheng729633c2016-03-01 14:18:24 +0800397 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
398 section_covers_addr(section, addr)) {
399 update = false;
400 } else {
401 section = phys_page_find(d->phys_map, addr, d->map.nodes,
402 d->map.sections);
403 update = true;
404 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200405 if (resolve_subpage && section->mr->subpage) {
406 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200407 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200408 }
Fam Zheng729633c2016-03-01 14:18:24 +0800409 if (update) {
410 atomic_set(&d->mru_section, section);
411 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200412 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200413}
414
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100415/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200416static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200417address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200418 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200419{
420 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200421 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100422 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200423
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200424 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200425 /* Compute offset within MemoryRegionSection */
426 addr -= section->offset_within_address_space;
427
428 /* Compute offset within MemoryRegion */
429 *xlat = addr + section->offset_within_region;
430
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200431 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200432
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
438 * here.
439 *
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
443 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200444 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200445 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200446 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
447 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200448 return section;
449}
Jan Kiszka90260c62013-05-26 21:46:51 +0200450
Paolo Bonzini41063e12015-03-18 14:21:43 +0100451/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200452MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
453 hwaddr *xlat, hwaddr *plen,
454 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200455{
Avi Kivity30951152012-10-30 13:47:46 +0200456 IOMMUTLBEntry iotlb;
457 MemoryRegionSection *section;
458 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200459
460 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100461 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
462 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200463 mr = section->mr;
464
465 if (!mr->iommu_ops) {
466 break;
467 }
468
Le Tan8d7b8cb2014-08-16 13:55:37 +0800469 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200470 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
471 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700472 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200473 if (!(iotlb.perm & (1 << is_write))) {
474 mr = &io_mem_unassigned;
475 break;
476 }
477
478 as = iotlb.target_as;
479 }
480
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000481 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100482 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700483 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100484 }
485
Avi Kivity30951152012-10-30 13:47:46 +0200486 *xlat = addr;
487 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200488}
489
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100490/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200491MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000492address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200493 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200494{
Avi Kivity30951152012-10-30 13:47:46 +0200495 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000496 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
497
498 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200499
500 assert(!section->mr->iommu_ops);
501 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200502}
bellard9fa3e852004-01-04 18:06:42 +0000503#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000504
Andreas Färberb170fce2013-01-20 20:23:22 +0100505#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000506
Juan Quintelae59fb372009-09-29 22:48:21 +0200507static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200508{
Andreas Färber259186a2013-01-17 18:51:17 +0100509 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510
aurel323098dba2009-03-07 21:28:24 +0000511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100513 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100514 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000515
516 return 0;
517}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200518
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400519static int cpu_common_pre_load(void *opaque)
520{
521 CPUState *cpu = opaque;
522
Paolo Bonziniadee6422014-12-19 12:53:14 +0100523 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524
525 return 0;
526}
527
528static bool cpu_common_exception_index_needed(void *opaque)
529{
530 CPUState *cpu = opaque;
531
Paolo Bonziniadee6422014-12-19 12:53:14 +0100532 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533}
534
535static const VMStateDescription vmstate_cpu_common_exception_index = {
536 .name = "cpu_common/exception_index",
537 .version_id = 1,
538 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200539 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 .fields = (VMStateField[]) {
541 VMSTATE_INT32(exception_index, CPUState),
542 VMSTATE_END_OF_LIST()
543 }
544};
545
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300546static bool cpu_common_crash_occurred_needed(void *opaque)
547{
548 CPUState *cpu = opaque;
549
550 return cpu->crash_occurred;
551}
552
553static const VMStateDescription vmstate_cpu_common_crash_occurred = {
554 .name = "cpu_common/crash_occurred",
555 .version_id = 1,
556 .minimum_version_id = 1,
557 .needed = cpu_common_crash_occurred_needed,
558 .fields = (VMStateField[]) {
559 VMSTATE_BOOL(crash_occurred, CPUState),
560 VMSTATE_END_OF_LIST()
561 }
562};
563
Andreas Färber1a1562f2013-06-17 04:09:11 +0200564const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200565 .name = "cpu_common",
566 .version_id = 1,
567 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400568 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200569 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200570 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100571 VMSTATE_UINT32(halted, CPUState),
572 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200573 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400574 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200575 .subsections = (const VMStateDescription*[]) {
576 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300577 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200578 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200579 }
580};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200581
pbrook9656f322008-07-01 20:01:19 +0000582#endif
583
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100584CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400585{
Andreas Färberbdc44642013-06-24 23:50:24 +0200586 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400587
Andreas Färberbdc44642013-06-24 23:50:24 +0200588 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100589 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200590 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100591 }
Glauber Costa950f1472009-06-09 12:15:18 -0400592 }
593
Andreas Färberbdc44642013-06-24 23:50:24 +0200594 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400595}
596
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000598void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000600 CPUAddressSpace *newas;
601
602 /* Target code should have set num_ases before calling us */
603 assert(asidx < cpu->num_ases);
604
Peter Maydell56943e82016-01-21 14:15:04 +0000605 if (asidx == 0) {
606 /* address space 0 gets the convenience alias */
607 cpu->as = as;
608 }
609
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000612
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000613 if (!cpu->cpu_ases) {
614 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000615 }
Peter Maydell32857f42015-10-01 15:29:50 +0100616
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000617 newas = &cpu->cpu_ases[asidx];
618 newas->cpu = cpu;
619 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000620 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000621 newas->tcg_as_listener.commit = tcg_commit;
622 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000623 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000624}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000625
626AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
627{
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu->cpu_ases[asidx].as;
630}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000631#endif
632
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530633void cpu_exec_exit(CPUState *cpu)
634{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530635 CPUClass *cc = CPU_GET_CLASS(cpu);
636
Paolo Bonzini267f6852016-08-28 03:45:14 +0200637 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530638
639 if (cc->vmsd != NULL) {
640 vmstate_unregister(NULL, cc->vmsd, cpu);
641 }
642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
644 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645}
646
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700647void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000648{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200649 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Igor Mammedova07f9532016-07-25 11:59:21 +0200650 Error *local_err ATTRIBUTE_UNUSED = NULL;
bellard6a00d602005-11-21 23:25:50 +0000651
Peter Maydell56943e82016-01-21 14:15:04 +0000652 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000653 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000654
Eduardo Habkost291135b2015-04-27 17:00:33 -0300655#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300656 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000657
658 /* This is a softmmu CPU object, so create a property for it
659 * so users can wire up its memory. (This can't go in qom/cpu.c
660 * because that file is compiled only once for both user-mode
661 * and system builds.) The default if no link is set up is to use
662 * the system address space.
663 */
664 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
665 (Object **)&cpu->memory,
666 qdev_prop_allow_set_link_before_realize,
667 OBJ_PROP_LINK_UNREF_ON_RELEASE,
668 &error_abort);
669 cpu->memory = system_memory;
670 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300671#endif
672
Paolo Bonzini267f6852016-08-28 03:45:14 +0200673 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200674
675#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200676 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200677 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200678 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100679 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200680 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100681 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000683}
684
Paul Brook94df27f2010-02-28 23:47:45 +0000685#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200686static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000687{
688 tb_invalidate_phys_page_range(pc, pc + 1, 0);
689}
690#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200691static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400692{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000693 MemTxAttrs attrs;
694 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
695 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400696 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000697 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100698 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400699 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400700}
bellardc27004e2005-01-03 23:35:10 +0000701#endif
bellardd720b932004-04-25 17:57:43 +0000702
Paul Brookc527ee82010-03-01 03:31:14 +0000703#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200704void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000705
706{
707}
708
Peter Maydell3ee887e2014-09-12 14:06:48 +0100709int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
710 int flags)
711{
712 return -ENOSYS;
713}
714
715void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
716{
717}
718
Andreas Färber75a34032013-09-02 16:57:02 +0200719int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000720 int flags, CPUWatchpoint **watchpoint)
721{
722 return -ENOSYS;
723}
724#else
pbrook6658ffb2007-03-16 23:58:11 +0000725/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200726int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000727 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000728{
aliguoric0ce9982008-11-25 22:13:57 +0000729 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000730
Peter Maydell05068c02014-09-12 14:06:48 +0100731 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700732 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200733 error_report("tried to set invalid watchpoint at %"
734 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000735 return -EINVAL;
736 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500737 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000738
aliguoria1d1bb32008-11-18 20:07:32 +0000739 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100740 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000741 wp->flags = flags;
742
aliguori2dc9f412008-11-18 20:56:59 +0000743 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200744 if (flags & BP_GDB) {
745 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
746 } else {
747 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
748 }
aliguoria1d1bb32008-11-18 20:07:32 +0000749
Andreas Färber31b030d2013-09-04 01:29:02 +0200750 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000751
752 if (watchpoint)
753 *watchpoint = wp;
754 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000755}
756
aliguoria1d1bb32008-11-18 20:07:32 +0000757/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200758int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000759 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000760{
aliguoria1d1bb32008-11-18 20:07:32 +0000761 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000762
Andreas Färberff4700b2013-08-26 18:23:18 +0200763 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100764 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000765 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200766 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000767 return 0;
768 }
769 }
aliguoria1d1bb32008-11-18 20:07:32 +0000770 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000771}
772
aliguoria1d1bb32008-11-18 20:07:32 +0000773/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200774void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000775{
Andreas Färberff4700b2013-08-26 18:23:18 +0200776 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000777
Andreas Färber31b030d2013-09-04 01:29:02 +0200778 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000779
Anthony Liguori7267c092011-08-20 22:09:37 -0500780 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000781}
782
aliguoria1d1bb32008-11-18 20:07:32 +0000783/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200784void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000785{
aliguoric0ce9982008-11-25 22:13:57 +0000786 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000787
Andreas Färberff4700b2013-08-26 18:23:18 +0200788 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200789 if (wp->flags & mask) {
790 cpu_watchpoint_remove_by_ref(cpu, wp);
791 }
aliguoric0ce9982008-11-25 22:13:57 +0000792 }
aliguoria1d1bb32008-11-18 20:07:32 +0000793}
Peter Maydell05068c02014-09-12 14:06:48 +0100794
795/* Return true if this watchpoint address matches the specified
796 * access (ie the address range covered by the watchpoint overlaps
797 * partially or completely with the address range covered by the
798 * access).
799 */
800static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
801 vaddr addr,
802 vaddr len)
803{
804 /* We know the lengths are non-zero, but a little caution is
805 * required to avoid errors in the case where the range ends
806 * exactly at the top of the address space and so addr + len
807 * wraps round to zero.
808 */
809 vaddr wpend = wp->vaddr + wp->len - 1;
810 vaddr addrend = addr + len - 1;
811
812 return !(addr > wpend || wp->vaddr > addrend);
813}
814
Paul Brookc527ee82010-03-01 03:31:14 +0000815#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000816
817/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200818int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000819 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000820{
aliguoric0ce9982008-11-25 22:13:57 +0000821 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000822
Anthony Liguori7267c092011-08-20 22:09:37 -0500823 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000824
825 bp->pc = pc;
826 bp->flags = flags;
827
aliguori2dc9f412008-11-18 20:56:59 +0000828 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200829 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200830 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200831 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200832 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200833 }
aliguoria1d1bb32008-11-18 20:07:32 +0000834
Andreas Färberf0c3c502013-08-26 21:22:53 +0200835 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000836
Andreas Färber00b941e2013-06-29 18:55:54 +0200837 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000838 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200839 }
aliguoria1d1bb32008-11-18 20:07:32 +0000840 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000841}
842
843/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200844int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000845{
aliguoria1d1bb32008-11-18 20:07:32 +0000846 CPUBreakpoint *bp;
847
Andreas Färberf0c3c502013-08-26 21:22:53 +0200848 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000849 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200850 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000851 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000852 }
bellard4c3a88a2003-07-26 12:06:08 +0000853 }
aliguoria1d1bb32008-11-18 20:07:32 +0000854 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000855}
856
aliguoria1d1bb32008-11-18 20:07:32 +0000857/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200858void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000859{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200860 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
861
862 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000863
Anthony Liguori7267c092011-08-20 22:09:37 -0500864 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000865}
866
867/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200868void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000869{
aliguoric0ce9982008-11-25 22:13:57 +0000870 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000871
Andreas Färberf0c3c502013-08-26 21:22:53 +0200872 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200873 if (bp->flags & mask) {
874 cpu_breakpoint_remove_by_ref(cpu, bp);
875 }
aliguoric0ce9982008-11-25 22:13:57 +0000876 }
bellard4c3a88a2003-07-26 12:06:08 +0000877}
878
bellardc33a3462003-07-29 20:50:33 +0000879/* enable or disable single step mode. EXCP_DEBUG is returned by the
880 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200881void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000882{
Andreas Färbered2803d2013-06-21 20:20:45 +0200883 if (cpu->singlestep_enabled != enabled) {
884 cpu->singlestep_enabled = enabled;
885 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200886 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200887 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100888 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000889 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700890 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000891 }
bellardc33a3462003-07-29 20:50:33 +0000892 }
bellardc33a3462003-07-29 20:50:33 +0000893}
894
Andreas Färbera47dddd2013-09-03 17:38:47 +0200895void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000896{
897 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000898 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000899
900 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000901 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000902 fprintf(stderr, "qemu: fatal: ");
903 vfprintf(stderr, fmt, ap);
904 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200905 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100906 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000907 qemu_log("qemu: fatal: ");
908 qemu_log_vprintf(fmt, ap2);
909 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200910 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000911 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000912 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000913 }
pbrook493ae1f2007-11-23 16:53:59 +0000914 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000915 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300916 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200917#if defined(CONFIG_USER_ONLY)
918 {
919 struct sigaction act;
920 sigfillset(&act.sa_mask);
921 act.sa_handler = SIG_DFL;
922 sigaction(SIGABRT, &act, NULL);
923 }
924#endif
bellard75012672003-06-21 13:11:07 +0000925 abort();
926}
927
bellard01243112004-01-04 15:48:17 +0000928#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400929/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200930static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
931{
932 RAMBlock *block;
933
Paolo Bonzini43771532013-09-09 17:58:40 +0200934 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200935 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200936 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200937 }
Mike Day0dc3f442013-09-05 14:41:35 -0400938 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200939 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200940 goto found;
941 }
942 }
943
944 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
945 abort();
946
947found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200948 /* It is safe to write mru_block outside the iothread lock. This
949 * is what happens:
950 *
951 * mru_block = xxx
952 * rcu_read_unlock()
953 * xxx removed from list
954 * rcu_read_lock()
955 * read mru_block
956 * mru_block = NULL;
957 * call_rcu(reclaim_ramblock, xxx);
958 * rcu_read_unlock()
959 *
960 * atomic_rcu_set is not needed here. The block was already published
961 * when it was placed into the list. Here we're just making an extra
962 * copy of the pointer.
963 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200964 ram_list.mru_block = block;
965 return block;
966}
967
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200968static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000969{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700970 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200971 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200972 RAMBlock *block;
973 ram_addr_t end;
974
975 end = TARGET_PAGE_ALIGN(start + length);
976 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000977
Mike Day0dc3f442013-09-05 14:41:35 -0400978 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200979 block = qemu_get_ram_block(start);
980 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200981 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700982 CPU_FOREACH(cpu) {
983 tlb_reset_dirty(cpu, start1, length);
984 }
Mike Day0dc3f442013-09-05 14:41:35 -0400985 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200986}
987
988/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000989bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
990 ram_addr_t length,
991 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200992{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000993 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000994 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000995 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200996
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000997 if (length == 0) {
998 return false;
999 }
1000
1001 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1002 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001003
1004 rcu_read_lock();
1005
1006 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1007
1008 while (page < end) {
1009 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1010 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1011 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1012
1013 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1014 offset, num);
1015 page += num;
1016 }
1017
1018 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001019
1020 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001021 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001022 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001023
1024 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001025}
1026
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001027/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001028hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001029 MemoryRegionSection *section,
1030 target_ulong vaddr,
1031 hwaddr paddr, hwaddr xlat,
1032 int prot,
1033 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001034{
Avi Kivitya8170e52012-10-23 12:30:10 +02001035 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001036 CPUWatchpoint *wp;
1037
Blue Swirlcc5bea62012-04-14 14:56:48 +00001038 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001039 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001040 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001041 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001042 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001043 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001044 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001045 }
1046 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001047 AddressSpaceDispatch *d;
1048
1049 d = atomic_rcu_read(&section->address_space->dispatch);
1050 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001051 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001052 }
1053
1054 /* Make accesses to pages with watchpoints go via the
1055 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001056 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001057 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001058 /* Avoid trapping reads of pages with a write breakpoint. */
1059 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001060 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001061 *address |= TLB_MMIO;
1062 break;
1063 }
1064 }
1065 }
1066
1067 return iotlb;
1068}
bellard9fa3e852004-01-04 18:06:42 +00001069#endif /* defined(CONFIG_USER_ONLY) */
1070
pbrooke2eef172008-06-08 01:09:01 +00001071#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001072
Anthony Liguoric227f092009-10-01 16:12:16 -05001073static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001074 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001075static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001076
Igor Mammedova2b257d2014-10-31 16:38:37 +00001077static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1078 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001079
1080/*
1081 * Set a custom physical guest memory alloator.
1082 * Accelerators with unusual needs may need this. Hopefully, we can
1083 * get rid of it eventually.
1084 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001085void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001086{
1087 phys_mem_alloc = alloc;
1088}
1089
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001090static uint16_t phys_section_add(PhysPageMap *map,
1091 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001092{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001093 /* The physical section number is ORed with a page-aligned
1094 * pointer to produce the iotlb entries. Thus it should
1095 * never overflow into the page-aligned value.
1096 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001097 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001098
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099 if (map->sections_nb == map->sections_nb_alloc) {
1100 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1101 map->sections = g_renew(MemoryRegionSection, map->sections,
1102 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001103 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001105 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001107}
1108
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001109static void phys_section_destroy(MemoryRegion *mr)
1110{
Don Slutz55b4e802015-11-30 17:11:04 -05001111 bool have_sub_page = mr->subpage;
1112
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001113 memory_region_unref(mr);
1114
Don Slutz55b4e802015-11-30 17:11:04 -05001115 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001116 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001117 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001118 g_free(subpage);
1119 }
1120}
1121
Paolo Bonzini60926662013-05-29 12:30:26 +02001122static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001123{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001124 while (map->sections_nb > 0) {
1125 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001126 phys_section_destroy(section->mr);
1127 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001128 g_free(map->sections);
1129 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001130}
1131
Avi Kivityac1970f2012-10-03 16:22:53 +02001132static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001133{
1134 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001135 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001136 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001137 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001138 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001139 MemoryRegionSection subsection = {
1140 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001141 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001143 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144
Avi Kivityf3705d52012-03-08 16:16:34 +02001145 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146
Avi Kivityf3705d52012-03-08 16:16:34 +02001147 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001148 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001149 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001150 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001151 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001152 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001153 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001154 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001155 }
1156 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001157 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001158 subpage_register(subpage, start, end,
1159 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001160}
1161
1162
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001163static void register_multipage(AddressSpaceDispatch *d,
1164 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001165{
Avi Kivitya8170e52012-10-23 12:30:10 +02001166 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001167 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001168 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1169 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001170
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001171 assert(num_pages);
1172 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001173}
1174
Avi Kivityac1970f2012-10-03 16:22:53 +02001175static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001176{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001177 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001178 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001179 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001180 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001181
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001182 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1183 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1184 - now.offset_within_address_space;
1185
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001186 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001187 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001188 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001189 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001190 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001191 while (int128_ne(remain.size, now.size)) {
1192 remain.size = int128_sub(remain.size, now.size);
1193 remain.offset_within_address_space += int128_get64(now.size);
1194 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001195 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001196 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001198 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001199 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001200 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001201 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001202 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001203 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001204 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001205 }
1206}
1207
Sheng Yang62a27442010-01-26 19:21:16 +08001208void qemu_flush_coalesced_mmio_buffer(void)
1209{
1210 if (kvm_enabled())
1211 kvm_flush_coalesced_mmio_buffer();
1212}
1213
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001214void qemu_mutex_lock_ramlist(void)
1215{
1216 qemu_mutex_lock(&ram_list.mutex);
1217}
1218
1219void qemu_mutex_unlock_ramlist(void)
1220{
1221 qemu_mutex_unlock(&ram_list.mutex);
1222}
1223
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001224#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001225static void *file_ram_alloc(RAMBlock *block,
1226 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001227 const char *path,
1228 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001229{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001230 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001232 char *sanitized_name;
1233 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001234 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001235 int fd = -1;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001237 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1238 error_setg(errp,
1239 "host lacks kvm mmu notifiers, -mem-path unsupported");
1240 return NULL;
1241 }
1242
1243 for (;;) {
1244 fd = open(path, O_RDWR);
1245 if (fd >= 0) {
1246 /* @path names an existing file, use it */
1247 break;
1248 }
1249 if (errno == ENOENT) {
1250 /* @path names a file that doesn't exist, create it */
1251 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1252 if (fd >= 0) {
1253 unlink_on_error = true;
1254 break;
1255 }
1256 } else if (errno == EISDIR) {
1257 /* @path names a directory, create a file there */
1258 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1259 sanitized_name = g_strdup(memory_region_name(block->mr));
1260 for (c = sanitized_name; *c != '\0'; c++) {
1261 if (*c == '/') {
1262 *c = '_';
1263 }
1264 }
1265
1266 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1267 sanitized_name);
1268 g_free(sanitized_name);
1269
1270 fd = mkstemp(filename);
1271 if (fd >= 0) {
1272 unlink(filename);
1273 g_free(filename);
1274 break;
1275 }
1276 g_free(filename);
1277 }
1278 if (errno != EEXIST && errno != EINTR) {
1279 error_setg_errno(errp, errno,
1280 "can't open backing store %s for guest RAM",
1281 path);
1282 goto error;
1283 }
1284 /*
1285 * Try again on EINTR and EEXIST. The latter happens when
1286 * something else creates the file between our two open().
1287 */
1288 }
1289
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001290 block->page_size = qemu_fd_getpagesize(fd);
Haozhong Zhang83606682016-10-24 20:49:37 +08001291 block->mr->align = block->page_size;
1292#if defined(__s390x__)
1293 if (kvm_enabled()) {
1294 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1295 }
1296#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03001297
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001298 if (memory < block->page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001299 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001300 "or larger than page size 0x%zx",
1301 memory, block->page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001302 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001303 }
1304
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001305 memory = ROUND_UP(memory, block->page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001306
1307 /*
1308 * ftruncate is not supported by hugetlbfs in older
1309 * hosts, so don't bother bailing out on errors.
1310 * If anything goes wrong with it under other filesystems,
1311 * mmap will fail.
1312 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001313 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001314 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001315 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001316
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001317 area = qemu_ram_mmap(fd, memory, block->mr->align,
1318 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001319 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001320 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001321 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001322 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001323 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001324
1325 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001326 os_mem_prealloc(fd, area, memory, errp);
1327 if (errp && *errp) {
1328 goto error;
1329 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001330 }
1331
Alex Williamson04b16652010-07-02 11:13:17 -06001332 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001333 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001334
1335error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001336 if (area != MAP_FAILED) {
1337 qemu_ram_munmap(area, memory);
1338 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001339 if (unlink_on_error) {
1340 unlink(path);
1341 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001342 if (fd != -1) {
1343 close(fd);
1344 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001345 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001346}
1347#endif
1348
Mike Day0dc3f442013-09-05 14:41:35 -04001349/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001350static ram_addr_t find_ram_offset(ram_addr_t size)
1351{
Alex Williamson04b16652010-07-02 11:13:17 -06001352 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001353 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001354
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001355 assert(size != 0); /* it would hand out same offset multiple times */
1356
Mike Day0dc3f442013-09-05 14:41:35 -04001357 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001358 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001359 }
Alex Williamson04b16652010-07-02 11:13:17 -06001360
Mike Day0dc3f442013-09-05 14:41:35 -04001361 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001362 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001363
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001364 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001365
Mike Day0dc3f442013-09-05 14:41:35 -04001366 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001367 if (next_block->offset >= end) {
1368 next = MIN(next, next_block->offset);
1369 }
1370 }
1371 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001372 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001373 mingap = next - end;
1374 }
1375 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001376
1377 if (offset == RAM_ADDR_MAX) {
1378 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1379 (uint64_t)size);
1380 abort();
1381 }
1382
Alex Williamson04b16652010-07-02 11:13:17 -06001383 return offset;
1384}
1385
Juan Quintela652d7ec2012-07-20 10:37:54 +02001386ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001387{
Alex Williamsond17b5282010-06-25 11:08:38 -06001388 RAMBlock *block;
1389 ram_addr_t last = 0;
1390
Mike Day0dc3f442013-09-05 14:41:35 -04001391 rcu_read_lock();
1392 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001393 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001394 }
Mike Day0dc3f442013-09-05 14:41:35 -04001395 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001396 return last;
1397}
1398
Jason Baronddb97f12012-08-02 15:44:16 -04001399static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1400{
1401 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001402
1403 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001404 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001405 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1406 if (ret) {
1407 perror("qemu_madvise");
1408 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1409 "but dump_guest_core=off specified\n");
1410 }
1411 }
1412}
1413
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001414const char *qemu_ram_get_idstr(RAMBlock *rb)
1415{
1416 return rb->idstr;
1417}
1418
Mike Dayae3a7042013-09-05 14:41:35 -04001419/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001420void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001421{
Gongleifa53a0e2016-05-10 10:04:59 +08001422 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001423
Avi Kivityc5705a72011-12-20 15:59:12 +02001424 assert(new_block);
1425 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001426
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001427 if (dev) {
1428 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001429 if (id) {
1430 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001431 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001432 }
1433 }
1434 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1435
Gongleiab0a9952016-05-10 10:05:00 +08001436 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001437 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001438 if (block != new_block &&
1439 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001440 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1441 new_block->idstr);
1442 abort();
1443 }
1444 }
Mike Day0dc3f442013-09-05 14:41:35 -04001445 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001446}
1447
Mike Dayae3a7042013-09-05 14:41:35 -04001448/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001449void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001450{
Mike Dayae3a7042013-09-05 14:41:35 -04001451 /* FIXME: arch_init.c assumes that this is not called throughout
1452 * migration. Ignore the problem since hot-unplug during migration
1453 * does not work anyway.
1454 */
Hu Tao20cfe882014-04-02 15:13:26 +08001455 if (block) {
1456 memset(block->idstr, 0, sizeof(block->idstr));
1457 }
1458}
1459
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001460size_t qemu_ram_pagesize(RAMBlock *rb)
1461{
1462 return rb->page_size;
1463}
1464
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001465static int memory_try_enable_merging(void *addr, size_t len)
1466{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001467 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001468 /* disabled by the user */
1469 return 0;
1470 }
1471
1472 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1473}
1474
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001475/* Only legal before guest might have detected the memory size: e.g. on
1476 * incoming migration, or right after reset.
1477 *
1478 * As memory core doesn't know how is memory accessed, it is up to
1479 * resize callback to update device state and/or add assertions to detect
1480 * misuse, if necessary.
1481 */
Gongleifa53a0e2016-05-10 10:04:59 +08001482int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001483{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001484 assert(block);
1485
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001486 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001487
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001488 if (block->used_length == newsize) {
1489 return 0;
1490 }
1491
1492 if (!(block->flags & RAM_RESIZEABLE)) {
1493 error_setg_errno(errp, EINVAL,
1494 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1495 " in != 0x" RAM_ADDR_FMT, block->idstr,
1496 newsize, block->used_length);
1497 return -EINVAL;
1498 }
1499
1500 if (block->max_length < newsize) {
1501 error_setg_errno(errp, EINVAL,
1502 "Length too large: %s: 0x" RAM_ADDR_FMT
1503 " > 0x" RAM_ADDR_FMT, block->idstr,
1504 newsize, block->max_length);
1505 return -EINVAL;
1506 }
1507
1508 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1509 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001510 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1511 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001512 memory_region_set_size(block->mr, newsize);
1513 if (block->resized) {
1514 block->resized(block->idstr, newsize, block->host);
1515 }
1516 return 0;
1517}
1518
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001519/* Called with ram_list.mutex held */
1520static void dirty_memory_extend(ram_addr_t old_ram_size,
1521 ram_addr_t new_ram_size)
1522{
1523 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1524 DIRTY_MEMORY_BLOCK_SIZE);
1525 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1526 DIRTY_MEMORY_BLOCK_SIZE);
1527 int i;
1528
1529 /* Only need to extend if block count increased */
1530 if (new_num_blocks <= old_num_blocks) {
1531 return;
1532 }
1533
1534 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1535 DirtyMemoryBlocks *old_blocks;
1536 DirtyMemoryBlocks *new_blocks;
1537 int j;
1538
1539 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1540 new_blocks = g_malloc(sizeof(*new_blocks) +
1541 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1542
1543 if (old_num_blocks) {
1544 memcpy(new_blocks->blocks, old_blocks->blocks,
1545 old_num_blocks * sizeof(old_blocks->blocks[0]));
1546 }
1547
1548 for (j = old_num_blocks; j < new_num_blocks; j++) {
1549 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1550 }
1551
1552 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1553
1554 if (old_blocks) {
1555 g_free_rcu(old_blocks, rcu);
1556 }
1557 }
1558}
1559
Fam Zheng528f46a2016-03-01 14:18:18 +08001560static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001561{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001562 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001563 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001564 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001565 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001566
1567 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001568
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001569 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001570 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001571
1572 if (!new_block->host) {
1573 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001574 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001575 new_block->mr, &err);
1576 if (err) {
1577 error_propagate(errp, err);
1578 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001579 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001580 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001582 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001583 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001584 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001585 error_setg_errno(errp, errno,
1586 "cannot set up guest memory '%s'",
1587 memory_region_name(new_block->mr));
1588 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001589 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001590 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001591 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001592 }
1593 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001594
Li Zhijiandd631692015-07-02 20:18:06 +08001595 new_ram_size = MAX(old_ram_size,
1596 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1597 if (new_ram_size > old_ram_size) {
1598 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001599 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001600 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001601 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1602 * QLIST (which has an RCU-friendly variant) does not have insertion at
1603 * tail, so save the last element in last_block.
1604 */
Mike Day0dc3f442013-09-05 14:41:35 -04001605 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001606 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001607 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001608 break;
1609 }
1610 }
1611 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001612 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001613 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001614 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001615 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001616 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001617 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001618 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001619
Mike Day0dc3f442013-09-05 14:41:35 -04001620 /* Write list before version */
1621 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001622 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001623 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001624
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001625 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001626 new_block->used_length,
1627 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001628
Paolo Bonzinia904c912015-01-21 16:18:35 +01001629 if (new_block->host) {
1630 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1631 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001632 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001633 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001634 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001635}
1636
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001637#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001638RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1639 bool share, const char *mem_path,
1640 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001641{
1642 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001643 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001644
1645 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001646 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001647 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001648 }
1649
1650 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1651 /*
1652 * file_ram_alloc() needs to allocate just like
1653 * phys_mem_alloc, but we haven't bothered to provide
1654 * a hook there.
1655 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001656 error_setg(errp,
1657 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001658 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001659 }
1660
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001661 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001662 new_block = g_malloc0(sizeof(*new_block));
1663 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001664 new_block->used_length = size;
1665 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001666 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001667 new_block->host = file_ram_alloc(new_block, size,
1668 mem_path, errp);
1669 if (!new_block->host) {
1670 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001671 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001672 }
1673
Fam Zheng528f46a2016-03-01 14:18:18 +08001674 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001675 if (local_err) {
1676 g_free(new_block);
1677 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001678 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001679 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001680 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001681}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001682#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001683
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001684static
Fam Zheng528f46a2016-03-01 14:18:18 +08001685RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1686 void (*resized)(const char*,
1687 uint64_t length,
1688 void *host),
1689 void *host, bool resizeable,
1690 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001691{
1692 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001693 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001694
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001695 size = HOST_PAGE_ALIGN(size);
1696 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001697 new_block = g_malloc0(sizeof(*new_block));
1698 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001699 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001700 new_block->used_length = size;
1701 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001702 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001703 new_block->fd = -1;
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001704 new_block->page_size = getpagesize();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001705 new_block->host = host;
1706 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001707 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001708 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001709 if (resizeable) {
1710 new_block->flags |= RAM_RESIZEABLE;
1711 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001712 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001713 if (local_err) {
1714 g_free(new_block);
1715 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001716 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001717 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001718 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001719}
1720
Fam Zheng528f46a2016-03-01 14:18:18 +08001721RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001722 MemoryRegion *mr, Error **errp)
1723{
1724 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1725}
1726
Fam Zheng528f46a2016-03-01 14:18:18 +08001727RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001728{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001729 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1730}
1731
Fam Zheng528f46a2016-03-01 14:18:18 +08001732RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001733 void (*resized)(const char*,
1734 uint64_t length,
1735 void *host),
1736 MemoryRegion *mr, Error **errp)
1737{
1738 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001739}
bellarde9a1ab12007-02-08 23:08:38 +00001740
Paolo Bonzini43771532013-09-09 17:58:40 +02001741static void reclaim_ramblock(RAMBlock *block)
1742{
1743 if (block->flags & RAM_PREALLOC) {
1744 ;
1745 } else if (xen_enabled()) {
1746 xen_invalidate_map_cache_entry(block->host);
1747#ifndef _WIN32
1748 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001749 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001750 close(block->fd);
1751#endif
1752 } else {
1753 qemu_anon_ram_free(block->host, block->max_length);
1754 }
1755 g_free(block);
1756}
1757
Fam Zhengf1060c52016-03-01 14:18:22 +08001758void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001759{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001760 if (!block) {
1761 return;
1762 }
1763
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001764 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001765 QLIST_REMOVE_RCU(block, next);
1766 ram_list.mru_block = NULL;
1767 /* Write list before version */
1768 smp_wmb();
1769 ram_list.version++;
1770 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001771 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001772}
1773
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774#ifndef _WIN32
1775void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1776{
1777 RAMBlock *block;
1778 ram_addr_t offset;
1779 int flags;
1780 void *area, *vaddr;
1781
Mike Day0dc3f442013-09-05 14:41:35 -04001782 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001783 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001784 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001785 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001786 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001787 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001788 } else if (xen_enabled()) {
1789 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001790 } else {
1791 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001792 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001793 flags |= (block->flags & RAM_SHARED ?
1794 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001795 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1796 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001797 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001798 /*
1799 * Remap needs to match alloc. Accelerators that
1800 * set phys_mem_alloc never remap. If they did,
1801 * we'd need a remap hook here.
1802 */
1803 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1804
Huang Yingcd19cfa2011-03-02 08:56:19 +01001805 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1806 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1807 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808 }
1809 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001810 fprintf(stderr, "Could not remap addr: "
1811 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001812 length, addr);
1813 exit(1);
1814 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001815 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001816 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001817 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001818 }
1819 }
1820}
1821#endif /* !_WIN32 */
1822
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001823/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001824 * This should not be used for general purpose DMA. Use address_space_map
1825 * or address_space_rw instead. For local memory (e.g. video ram) that the
1826 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001827 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001828 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001829 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001830void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001831{
Gonglei3655cb92016-02-20 10:35:20 +08001832 RAMBlock *block = ram_block;
1833
1834 if (block == NULL) {
1835 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001836 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001837 }
Mike Dayae3a7042013-09-05 14:41:35 -04001838
1839 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001840 /* We need to check if the requested address is in the RAM
1841 * because we don't want to map the entire memory in QEMU.
1842 * In that case just map until the end of the page.
1843 */
1844 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001845 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001846 }
Mike Dayae3a7042013-09-05 14:41:35 -04001847
1848 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001849 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001850 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001851}
1852
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001853/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001854 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001855 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001856 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001857 */
Gonglei3655cb92016-02-20 10:35:20 +08001858static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1859 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001860{
Gonglei3655cb92016-02-20 10:35:20 +08001861 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001862 if (*size == 0) {
1863 return NULL;
1864 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001865
Gonglei3655cb92016-02-20 10:35:20 +08001866 if (block == NULL) {
1867 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001868 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001869 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001870 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001871
1872 if (xen_enabled() && block->host == NULL) {
1873 /* We need to check if the requested address is in the RAM
1874 * because we don't want to map the entire memory in QEMU.
1875 * In that case just map the requested area.
1876 */
1877 if (block->offset == 0) {
1878 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001879 }
1880
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001881 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001882 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001883
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001884 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001885}
1886
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001887/*
1888 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1889 * in that RAMBlock.
1890 *
1891 * ptr: Host pointer to look up
1892 * round_offset: If true round the result offset down to a page boundary
1893 * *ram_addr: set to result ram_addr
1894 * *offset: set to result offset within the RAMBlock
1895 *
1896 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001897 *
1898 * By the time this function returns, the returned pointer is not protected
1899 * by RCU anymore. If the caller is not within an RCU critical section and
1900 * does not hold the iothread lock, it must have other means of protecting the
1901 * pointer, such as a reference to the region that includes the incoming
1902 * ram_addr_t.
1903 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001904RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001905 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001906{
pbrook94a6b542009-04-11 17:15:54 +00001907 RAMBlock *block;
1908 uint8_t *host = ptr;
1909
Jan Kiszka868bb332011-06-21 22:59:09 +02001910 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001911 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001912 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001913 ram_addr = xen_ram_addr_from_mapcache(ptr);
1914 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001915 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001916 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001917 }
Mike Day0dc3f442013-09-05 14:41:35 -04001918 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001919 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001920 }
1921
Mike Day0dc3f442013-09-05 14:41:35 -04001922 rcu_read_lock();
1923 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001924 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001925 goto found;
1926 }
1927
Mike Day0dc3f442013-09-05 14:41:35 -04001928 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001929 /* This case append when the block is not mapped. */
1930 if (block->host == NULL) {
1931 continue;
1932 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001933 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001934 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001935 }
pbrook94a6b542009-04-11 17:15:54 +00001936 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001937
Mike Day0dc3f442013-09-05 14:41:35 -04001938 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001939 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001940
1941found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001942 *offset = (host - block->host);
1943 if (round_offset) {
1944 *offset &= TARGET_PAGE_MASK;
1945 }
Mike Day0dc3f442013-09-05 14:41:35 -04001946 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001947 return block;
1948}
1949
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001950/*
1951 * Finds the named RAMBlock
1952 *
1953 * name: The name of RAMBlock to find
1954 *
1955 * Returns: RAMBlock (or NULL if not found)
1956 */
1957RAMBlock *qemu_ram_block_by_name(const char *name)
1958{
1959 RAMBlock *block;
1960
1961 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1962 if (!strcmp(name, block->idstr)) {
1963 return block;
1964 }
1965 }
1966
1967 return NULL;
1968}
1969
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001970/* Some of the softmmu routines need to translate from a host pointer
1971 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001972ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001973{
1974 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001975 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001976
Paolo Bonzinif615f392016-05-26 10:07:50 +02001977 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001978 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001979 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001980 }
1981
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001982 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001983}
Alex Williamsonf471a172010-06-11 11:11:42 -06001984
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001985/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001986static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001987 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001988{
Juan Quintela52159192013-10-08 12:44:04 +02001989 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001990 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001991 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001992 switch (size) {
1993 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001994 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001995 break;
1996 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001997 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001998 break;
1999 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002000 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002001 break;
2002 default:
2003 abort();
2004 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002005 /* Set both VGA and migration bits for simplicity and to remove
2006 * the notdirty callback faster.
2007 */
2008 cpu_physical_memory_set_dirty_range(ram_addr, size,
2009 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002010 /* we remove the notdirty callback only if the code has been
2011 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002012 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002013 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002014 }
bellard1ccde1c2004-02-06 19:46:14 +00002015}
2016
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002017static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2018 unsigned size, bool is_write)
2019{
2020 return is_write;
2021}
2022
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002023static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002024 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002025 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002026 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002027};
2028
pbrook0f459d12008-06-09 00:20:13 +00002029/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002030static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002031{
Andreas Färber93afead2013-08-26 03:41:01 +02002032 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002033 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002034 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002035 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002036 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002037 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002038 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002039
Andreas Färberff4700b2013-08-26 18:23:18 +02002040 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002041 /* We re-entered the check after replacing the TB. Now raise
2042 * the debug interrupt so that is will trigger after the
2043 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002044 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002045 return;
2046 }
Andreas Färber93afead2013-08-26 03:41:01 +02002047 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002048 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002049 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2050 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002051 if (flags == BP_MEM_READ) {
2052 wp->flags |= BP_WATCHPOINT_HIT_READ;
2053 } else {
2054 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2055 }
2056 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002057 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002058 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002059 if (wp->flags & BP_CPU &&
2060 !cc->debug_check_watchpoint(cpu, wp)) {
2061 wp->flags &= ~BP_WATCHPOINT_HIT;
2062 continue;
2063 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002064 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002065 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002066 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002067 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002068 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002069 } else {
2070 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002071 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002072 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002073 }
aliguori06d55cc2008-11-18 20:24:06 +00002074 }
aliguori6e140f22008-11-18 20:37:55 +00002075 } else {
2076 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002077 }
2078 }
2079}
2080
pbrook6658ffb2007-03-16 23:58:11 +00002081/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2082 so these check for a hit then pass through to the normal out-of-line
2083 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002084static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2085 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002086{
Peter Maydell66b9b432015-04-26 16:49:24 +01002087 MemTxResult res;
2088 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002089 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2090 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002091
Peter Maydell66b9b432015-04-26 16:49:24 +01002092 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002093 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002094 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002095 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002096 break;
2097 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002098 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002099 break;
2100 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002101 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002102 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002103 default: abort();
2104 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002105 *pdata = data;
2106 return res;
2107}
2108
2109static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2110 uint64_t val, unsigned size,
2111 MemTxAttrs attrs)
2112{
2113 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002114 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2115 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002116
2117 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2118 switch (size) {
2119 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002120 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002121 break;
2122 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002123 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002124 break;
2125 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002126 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002127 break;
2128 default: abort();
2129 }
2130 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002131}
2132
Avi Kivity1ec9b902012-01-02 12:47:48 +02002133static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002134 .read_with_attrs = watch_mem_read,
2135 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002136 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002137};
pbrook6658ffb2007-03-16 23:58:11 +00002138
Peter Maydellf25a49e2015-04-26 16:49:24 +01002139static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2140 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002141{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002143 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002144 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002145
blueswir1db7b5422007-05-26 17:36:03 +00002146#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002147 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002148 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002149#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002150 res = address_space_read(subpage->as, addr + subpage->base,
2151 attrs, buf, len);
2152 if (res) {
2153 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002154 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002155 switch (len) {
2156 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002157 *data = ldub_p(buf);
2158 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002159 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002160 *data = lduw_p(buf);
2161 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002162 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002163 *data = ldl_p(buf);
2164 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002165 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002166 *data = ldq_p(buf);
2167 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002168 default:
2169 abort();
2170 }
blueswir1db7b5422007-05-26 17:36:03 +00002171}
2172
Peter Maydellf25a49e2015-04-26 16:49:24 +01002173static MemTxResult subpage_write(void *opaque, hwaddr addr,
2174 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002175{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002177 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002178
blueswir1db7b5422007-05-26 17:36:03 +00002179#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002180 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002181 " value %"PRIx64"\n",
2182 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002183#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002184 switch (len) {
2185 case 1:
2186 stb_p(buf, value);
2187 break;
2188 case 2:
2189 stw_p(buf, value);
2190 break;
2191 case 4:
2192 stl_p(buf, value);
2193 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002194 case 8:
2195 stq_p(buf, value);
2196 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002197 default:
2198 abort();
2199 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002200 return address_space_write(subpage->as, addr + subpage->base,
2201 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002202}
2203
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002204static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002205 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002206{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002207 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002208#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002209 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002210 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002211#endif
2212
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002213 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002214 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002215}
2216
Avi Kivity70c68e42012-01-02 12:32:48 +02002217static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002218 .read_with_attrs = subpage_read,
2219 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002220 .impl.min_access_size = 1,
2221 .impl.max_access_size = 8,
2222 .valid.min_access_size = 1,
2223 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002224 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002225 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002226};
2227
Anthony Liguoric227f092009-10-01 16:12:16 -05002228static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002229 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002230{
2231 int idx, eidx;
2232
2233 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2234 return -1;
2235 idx = SUBPAGE_IDX(start);
2236 eidx = SUBPAGE_IDX(end);
2237#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002238 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2239 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002240#endif
blueswir1db7b5422007-05-26 17:36:03 +00002241 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002242 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002243 }
2244
2245 return 0;
2246}
2247
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002248static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002249{
Anthony Liguoric227f092009-10-01 16:12:16 -05002250 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002251
Vijaya Kumar K2615fab2016-10-24 16:26:49 +01002252 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002253 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002254 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002255 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002256 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002257 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002258#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002259 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2260 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002261#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002262 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002263
2264 return mmio;
2265}
2266
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002267static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2268 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002269{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002270 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002271 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002272 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002273 .mr = mr,
2274 .offset_within_address_space = 0,
2275 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002276 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002277 };
2278
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002279 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002280}
2281
Peter Maydella54c87b2016-01-21 14:15:05 +00002282MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002283{
Peter Maydella54c87b2016-01-21 14:15:05 +00002284 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2285 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002286 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002287 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002288
2289 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002290}
2291
Avi Kivitye9179ce2009-06-14 11:38:52 +03002292static void io_mem_init(void)
2293{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002294 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002295 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002296 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002297 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002298 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002299 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002300 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002301}
2302
Avi Kivityac1970f2012-10-03 16:22:53 +02002303static void mem_begin(MemoryListener *listener)
2304{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002305 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002306 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2307 uint16_t n;
2308
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002309 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002310 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002311 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002312 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002313 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002314 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002315 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002316 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002317
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002318 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002319 d->as = as;
2320 as->next_dispatch = d;
2321}
2322
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002323static void address_space_dispatch_free(AddressSpaceDispatch *d)
2324{
2325 phys_sections_free(&d->map);
2326 g_free(d);
2327}
2328
Paolo Bonzini00752702013-05-29 12:13:54 +02002329static void mem_commit(MemoryListener *listener)
2330{
2331 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002332 AddressSpaceDispatch *cur = as->dispatch;
2333 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002334
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002335 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002336
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002337 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002338 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002339 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002340 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002341}
2342
Avi Kivity1d711482012-10-02 18:54:45 +02002343static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002344{
Peter Maydell32857f42015-10-01 15:29:50 +01002345 CPUAddressSpace *cpuas;
2346 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002347
2348 /* since each CPU stores ram addresses in its TLB cache, we must
2349 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002350 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2351 cpu_reloading_memory_map();
2352 /* The CPU and TLB are protected by the iothread lock.
2353 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2354 * may have split the RCU critical section.
2355 */
2356 d = atomic_rcu_read(&cpuas->as->dispatch);
2357 cpuas->memory_dispatch = d;
2358 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002359}
2360
Avi Kivityac1970f2012-10-03 16:22:53 +02002361void address_space_init_dispatch(AddressSpace *as)
2362{
Paolo Bonzini00752702013-05-29 12:13:54 +02002363 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002364 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002365 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002366 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002367 .region_add = mem_add,
2368 .region_nop = mem_add,
2369 .priority = 0,
2370 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002371 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002372}
2373
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002374void address_space_unregister(AddressSpace *as)
2375{
2376 memory_listener_unregister(&as->dispatch_listener);
2377}
2378
Avi Kivity83f3c252012-10-07 12:59:55 +02002379void address_space_destroy_dispatch(AddressSpace *as)
2380{
2381 AddressSpaceDispatch *d = as->dispatch;
2382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002383 atomic_rcu_set(&as->dispatch, NULL);
2384 if (d) {
2385 call_rcu(d, address_space_dispatch_free, rcu);
2386 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002387}
2388
Avi Kivity62152b82011-07-26 14:26:14 +03002389static void memory_map_init(void)
2390{
Anthony Liguori7267c092011-08-20 22:09:37 -05002391 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002392
Paolo Bonzini57271d62013-11-07 17:14:37 +01002393 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002394 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002395
Anthony Liguori7267c092011-08-20 22:09:37 -05002396 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002397 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2398 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002399 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002400}
2401
2402MemoryRegion *get_system_memory(void)
2403{
2404 return system_memory;
2405}
2406
Avi Kivity309cb472011-08-08 16:09:03 +03002407MemoryRegion *get_system_io(void)
2408{
2409 return system_io;
2410}
2411
pbrooke2eef172008-06-08 01:09:01 +00002412#endif /* !defined(CONFIG_USER_ONLY) */
2413
bellard13eb76e2004-01-24 15:23:36 +00002414/* physical memory access (slow version, mainly for debug) */
2415#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002416int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002417 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002418{
2419 int l, flags;
2420 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002421 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002422
2423 while (len > 0) {
2424 page = addr & TARGET_PAGE_MASK;
2425 l = (page + TARGET_PAGE_SIZE) - addr;
2426 if (l > len)
2427 l = len;
2428 flags = page_get_flags(page);
2429 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002430 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002431 if (is_write) {
2432 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002433 return -1;
bellard579a97f2007-11-11 14:26:47 +00002434 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002435 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002436 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002437 memcpy(p, buf, l);
2438 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002439 } else {
2440 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002441 return -1;
bellard579a97f2007-11-11 14:26:47 +00002442 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002443 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002444 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002445 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002446 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002447 }
2448 len -= l;
2449 buf += l;
2450 addr += l;
2451 }
Paul Brooka68fe892010-03-01 00:08:59 +00002452 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002453}
bellard8df1cd02005-01-28 22:37:22 +00002454
bellard13eb76e2004-01-24 15:23:36 +00002455#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002456
Paolo Bonzini845b6212015-03-23 11:45:53 +01002457static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002458 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002459{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002460 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002461 addr += memory_region_get_ram_addr(mr);
2462
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002463 /* No early return if dirty_log_mask is or becomes 0, because
2464 * cpu_physical_memory_set_dirty_range will still call
2465 * xen_modified_memory.
2466 */
2467 if (dirty_log_mask) {
2468 dirty_log_mask =
2469 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002470 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002471 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2472 tb_invalidate_phys_range(addr, addr + length);
2473 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2474 }
2475 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002476}
2477
Richard Henderson23326162013-07-08 14:55:59 -07002478static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002479{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002480 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002481
2482 /* Regions are assumed to support 1-4 byte accesses unless
2483 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002484 if (access_size_max == 0) {
2485 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002486 }
Richard Henderson23326162013-07-08 14:55:59 -07002487
2488 /* Bound the maximum access by the alignment of the address. */
2489 if (!mr->ops->impl.unaligned) {
2490 unsigned align_size_max = addr & -addr;
2491 if (align_size_max != 0 && align_size_max < access_size_max) {
2492 access_size_max = align_size_max;
2493 }
2494 }
2495
2496 /* Don't attempt accesses larger than the maximum. */
2497 if (l > access_size_max) {
2498 l = access_size_max;
2499 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002500 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002501
2502 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002503}
2504
Jan Kiszka4840f102015-06-18 18:47:22 +02002505static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002506{
Jan Kiszka4840f102015-06-18 18:47:22 +02002507 bool unlocked = !qemu_mutex_iothread_locked();
2508 bool release_lock = false;
2509
2510 if (unlocked && mr->global_locking) {
2511 qemu_mutex_lock_iothread();
2512 unlocked = false;
2513 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002514 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002515 if (mr->flush_coalesced_mmio) {
2516 if (unlocked) {
2517 qemu_mutex_lock_iothread();
2518 }
2519 qemu_flush_coalesced_mmio_buffer();
2520 if (unlocked) {
2521 qemu_mutex_unlock_iothread();
2522 }
2523 }
2524
2525 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002526}
2527
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002528/* Called within RCU critical section. */
2529static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2530 MemTxAttrs attrs,
2531 const uint8_t *buf,
2532 int len, hwaddr addr1,
2533 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002534{
bellard13eb76e2004-01-24 15:23:36 +00002535 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002536 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002537 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002538 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002539
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002540 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002541 if (!memory_access_is_direct(mr, true)) {
2542 release_lock |= prepare_mmio_access(mr);
2543 l = memory_access_size(mr, l, addr1);
2544 /* XXX: could force current_cpu to NULL to avoid
2545 potential bugs */
2546 switch (l) {
2547 case 8:
2548 /* 64 bit write access */
2549 val = ldq_p(buf);
2550 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2551 attrs);
2552 break;
2553 case 4:
2554 /* 32 bit write access */
2555 val = ldl_p(buf);
2556 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2557 attrs);
2558 break;
2559 case 2:
2560 /* 16 bit write access */
2561 val = lduw_p(buf);
2562 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2563 attrs);
2564 break;
2565 case 1:
2566 /* 8 bit write access */
2567 val = ldub_p(buf);
2568 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2569 attrs);
2570 break;
2571 default:
2572 abort();
bellard13eb76e2004-01-24 15:23:36 +00002573 }
2574 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002575 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002576 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002577 memcpy(ptr, buf, l);
2578 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002579 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002580
2581 if (release_lock) {
2582 qemu_mutex_unlock_iothread();
2583 release_lock = false;
2584 }
2585
bellard13eb76e2004-01-24 15:23:36 +00002586 len -= l;
2587 buf += l;
2588 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002589
2590 if (!len) {
2591 break;
2592 }
2593
2594 l = len;
2595 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002596 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002597
Peter Maydell3b643492015-04-26 16:49:23 +01002598 return result;
bellard13eb76e2004-01-24 15:23:36 +00002599}
bellard8df1cd02005-01-28 22:37:22 +00002600
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002601MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2602 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002603{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002604 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002605 hwaddr addr1;
2606 MemoryRegion *mr;
2607 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002608
2609 if (len > 0) {
2610 rcu_read_lock();
2611 l = len;
2612 mr = address_space_translate(as, addr, &addr1, &l, true);
2613 result = address_space_write_continue(as, addr, attrs, buf, len,
2614 addr1, l, mr);
2615 rcu_read_unlock();
2616 }
2617
2618 return result;
2619}
2620
2621/* Called within RCU critical section. */
2622MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2623 MemTxAttrs attrs, uint8_t *buf,
2624 int len, hwaddr addr1, hwaddr l,
2625 MemoryRegion *mr)
2626{
2627 uint8_t *ptr;
2628 uint64_t val;
2629 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002630 bool release_lock = false;
2631
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002632 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002633 if (!memory_access_is_direct(mr, false)) {
2634 /* I/O case */
2635 release_lock |= prepare_mmio_access(mr);
2636 l = memory_access_size(mr, l, addr1);
2637 switch (l) {
2638 case 8:
2639 /* 64 bit read access */
2640 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2641 attrs);
2642 stq_p(buf, val);
2643 break;
2644 case 4:
2645 /* 32 bit read access */
2646 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2647 attrs);
2648 stl_p(buf, val);
2649 break;
2650 case 2:
2651 /* 16 bit read access */
2652 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2653 attrs);
2654 stw_p(buf, val);
2655 break;
2656 case 1:
2657 /* 8 bit read access */
2658 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2659 attrs);
2660 stb_p(buf, val);
2661 break;
2662 default:
2663 abort();
2664 }
2665 } else {
2666 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002667 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002668 memcpy(buf, ptr, l);
2669 }
2670
2671 if (release_lock) {
2672 qemu_mutex_unlock_iothread();
2673 release_lock = false;
2674 }
2675
2676 len -= l;
2677 buf += l;
2678 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002679
2680 if (!len) {
2681 break;
2682 }
2683
2684 l = len;
2685 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002686 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002687
2688 return result;
2689}
2690
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002691MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2692 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002693{
2694 hwaddr l;
2695 hwaddr addr1;
2696 MemoryRegion *mr;
2697 MemTxResult result = MEMTX_OK;
2698
2699 if (len > 0) {
2700 rcu_read_lock();
2701 l = len;
2702 mr = address_space_translate(as, addr, &addr1, &l, false);
2703 result = address_space_read_continue(as, addr, attrs, buf, len,
2704 addr1, l, mr);
2705 rcu_read_unlock();
2706 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002707
2708 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002709}
2710
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002711MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2712 uint8_t *buf, int len, bool is_write)
2713{
2714 if (is_write) {
2715 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2716 } else {
2717 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2718 }
2719}
Avi Kivityac1970f2012-10-03 16:22:53 +02002720
Avi Kivitya8170e52012-10-23 12:30:10 +02002721void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002722 int len, int is_write)
2723{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002724 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2725 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002726}
2727
Alexander Graf582b55a2013-12-11 14:17:44 +01002728enum write_rom_type {
2729 WRITE_DATA,
2730 FLUSH_CACHE,
2731};
2732
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002733static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002734 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002735{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002736 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002737 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002738 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002739 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002740
Paolo Bonzini41063e12015-03-18 14:21:43 +01002741 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002742 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002743 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002744 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002745
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002746 if (!(memory_region_is_ram(mr) ||
2747 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002748 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002749 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002750 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002751 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002752 switch (type) {
2753 case WRITE_DATA:
2754 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002755 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002756 break;
2757 case FLUSH_CACHE:
2758 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2759 break;
2760 }
bellardd0ecd2a2006-04-23 17:14:48 +00002761 }
2762 len -= l;
2763 buf += l;
2764 addr += l;
2765 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002766 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002767}
2768
Alexander Graf582b55a2013-12-11 14:17:44 +01002769/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002770void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002771 const uint8_t *buf, int len)
2772{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002773 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002774}
2775
2776void cpu_flush_icache_range(hwaddr start, int len)
2777{
2778 /*
2779 * This function should do the same thing as an icache flush that was
2780 * triggered from within the guest. For TCG we are always cache coherent,
2781 * so there is no need to flush anything. For KVM / Xen we need to flush
2782 * the host's instruction cache at least.
2783 */
2784 if (tcg_enabled()) {
2785 return;
2786 }
2787
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002788 cpu_physical_memory_write_rom_internal(&address_space_memory,
2789 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002790}
2791
aliguori6d16c2f2009-01-22 16:59:11 +00002792typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002793 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002794 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002795 hwaddr addr;
2796 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002797 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002798} BounceBuffer;
2799
2800static BounceBuffer bounce;
2801
aliguoriba223c22009-01-22 16:59:16 +00002802typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002803 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002804 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002805} MapClient;
2806
Fam Zheng38e047b2015-03-16 17:03:35 +08002807QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002808static QLIST_HEAD(map_client_list, MapClient) map_client_list
2809 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002810
Fam Zhenge95205e2015-03-16 17:03:37 +08002811static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002812{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002813 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002814 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002815}
2816
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002817static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002818{
2819 MapClient *client;
2820
Blue Swirl72cf2d42009-09-12 07:36:22 +00002821 while (!QLIST_EMPTY(&map_client_list)) {
2822 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002823 qemu_bh_schedule(client->bh);
2824 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002825 }
2826}
2827
Fam Zhenge95205e2015-03-16 17:03:37 +08002828void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002829{
2830 MapClient *client = g_malloc(sizeof(*client));
2831
Fam Zheng38e047b2015-03-16 17:03:35 +08002832 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002833 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002834 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002835 if (!atomic_read(&bounce.in_use)) {
2836 cpu_notify_map_clients_locked();
2837 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002838 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002839}
2840
Fam Zheng38e047b2015-03-16 17:03:35 +08002841void cpu_exec_init_all(void)
2842{
2843 qemu_mutex_init(&ram_list.mutex);
Peter Maydell20bccb82016-10-24 16:26:49 +01002844 /* The data structures we set up here depend on knowing the page size,
2845 * so no more changes can be made after this point.
2846 * In an ideal world, nothing we did before we had finished the
2847 * machine setup would care about the target page size, and we could
2848 * do this much later, rather than requiring board models to state
2849 * up front what their requirements are.
2850 */
2851 finalize_target_page_bits();
Fam Zheng38e047b2015-03-16 17:03:35 +08002852 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002853 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002854 qemu_mutex_init(&map_client_list_lock);
2855}
2856
Fam Zhenge95205e2015-03-16 17:03:37 +08002857void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002858{
Fam Zhenge95205e2015-03-16 17:03:37 +08002859 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002860
Fam Zhenge95205e2015-03-16 17:03:37 +08002861 qemu_mutex_lock(&map_client_list_lock);
2862 QLIST_FOREACH(client, &map_client_list, link) {
2863 if (client->bh == bh) {
2864 cpu_unregister_map_client_do(client);
2865 break;
2866 }
2867 }
2868 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002869}
2870
2871static void cpu_notify_map_clients(void)
2872{
Fam Zheng38e047b2015-03-16 17:03:35 +08002873 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002874 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002875 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002876}
2877
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002878bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2879{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002880 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002881 hwaddr l, xlat;
2882
Paolo Bonzini41063e12015-03-18 14:21:43 +01002883 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002884 while (len > 0) {
2885 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002886 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2887 if (!memory_access_is_direct(mr, is_write)) {
2888 l = memory_access_size(mr, l, addr);
2889 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002890 return false;
2891 }
2892 }
2893
2894 len -= l;
2895 addr += l;
2896 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002897 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002898 return true;
2899}
2900
aliguori6d16c2f2009-01-22 16:59:11 +00002901/* Map a physical memory region into a host virtual address.
2902 * May map a subset of the requested range, given by and returned in *plen.
2903 * May return NULL if resources needed to perform the mapping are exhausted.
2904 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002905 * Use cpu_register_map_client() to know when retrying the map operation is
2906 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002907 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002908void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002909 hwaddr addr,
2910 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002911 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002912{
Avi Kivitya8170e52012-10-23 12:30:10 +02002913 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002914 hwaddr done = 0;
2915 hwaddr l, xlat, base;
2916 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002917 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002918
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002919 if (len == 0) {
2920 return NULL;
2921 }
aliguori6d16c2f2009-01-22 16:59:11 +00002922
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002923 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002924 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002925 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002926
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002927 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002928 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002929 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002930 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002931 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002932 /* Avoid unbounded allocations */
2933 l = MIN(l, TARGET_PAGE_SIZE);
2934 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002935 bounce.addr = addr;
2936 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002937
2938 memory_region_ref(mr);
2939 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002940 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002941 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2942 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002943 }
aliguori6d16c2f2009-01-22 16:59:11 +00002944
Paolo Bonzini41063e12015-03-18 14:21:43 +01002945 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002946 *plen = l;
2947 return bounce.buffer;
2948 }
2949
2950 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002951
2952 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002953 len -= l;
2954 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002955 done += l;
2956 if (len == 0) {
2957 break;
2958 }
2959
2960 l = len;
2961 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2962 if (this_mr != mr || xlat != base + done) {
2963 break;
2964 }
aliguori6d16c2f2009-01-22 16:59:11 +00002965 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002967 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002968 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002969 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002970 rcu_read_unlock();
2971
2972 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002973}
2974
Avi Kivityac1970f2012-10-03 16:22:53 +02002975/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002976 * Will also mark the memory as dirty if is_write == 1. access_len gives
2977 * the amount of memory that was actually read or written by the caller.
2978 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002979void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2980 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002981{
2982 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002983 MemoryRegion *mr;
2984 ram_addr_t addr1;
2985
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002986 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002987 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002988 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002989 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002990 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002991 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002992 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002993 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002994 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002995 return;
2996 }
2997 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002998 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2999 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003000 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003001 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003002 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003003 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003004 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003005 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003006}
bellardd0ecd2a2006-04-23 17:14:48 +00003007
Avi Kivitya8170e52012-10-23 12:30:10 +02003008void *cpu_physical_memory_map(hwaddr addr,
3009 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003010 int is_write)
3011{
3012 return address_space_map(&address_space_memory, addr, plen, is_write);
3013}
3014
Avi Kivitya8170e52012-10-23 12:30:10 +02003015void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3016 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003017{
3018 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3019}
3020
bellard8df1cd02005-01-28 22:37:22 +00003021/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003022static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3023 MemTxAttrs attrs,
3024 MemTxResult *result,
3025 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003026{
bellard8df1cd02005-01-28 22:37:22 +00003027 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003028 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003029 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003030 hwaddr l = 4;
3031 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003032 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003033 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003034
Paolo Bonzini41063e12015-03-18 14:21:43 +01003035 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003036 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003037 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003038 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003039
bellard8df1cd02005-01-28 22:37:22 +00003040 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003041 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003042#if defined(TARGET_WORDS_BIGENDIAN)
3043 if (endian == DEVICE_LITTLE_ENDIAN) {
3044 val = bswap32(val);
3045 }
3046#else
3047 if (endian == DEVICE_BIG_ENDIAN) {
3048 val = bswap32(val);
3049 }
3050#endif
bellard8df1cd02005-01-28 22:37:22 +00003051 } else {
3052 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003053 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003054 switch (endian) {
3055 case DEVICE_LITTLE_ENDIAN:
3056 val = ldl_le_p(ptr);
3057 break;
3058 case DEVICE_BIG_ENDIAN:
3059 val = ldl_be_p(ptr);
3060 break;
3061 default:
3062 val = ldl_p(ptr);
3063 break;
3064 }
Peter Maydell50013112015-04-26 16:49:24 +01003065 r = MEMTX_OK;
3066 }
3067 if (result) {
3068 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003069 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003070 if (release_lock) {
3071 qemu_mutex_unlock_iothread();
3072 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003073 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003074 return val;
3075}
3076
Peter Maydell50013112015-04-26 16:49:24 +01003077uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3078 MemTxAttrs attrs, MemTxResult *result)
3079{
3080 return address_space_ldl_internal(as, addr, attrs, result,
3081 DEVICE_NATIVE_ENDIAN);
3082}
3083
3084uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3085 MemTxAttrs attrs, MemTxResult *result)
3086{
3087 return address_space_ldl_internal(as, addr, attrs, result,
3088 DEVICE_LITTLE_ENDIAN);
3089}
3090
3091uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3092 MemTxAttrs attrs, MemTxResult *result)
3093{
3094 return address_space_ldl_internal(as, addr, attrs, result,
3095 DEVICE_BIG_ENDIAN);
3096}
3097
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003098uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003099{
Peter Maydell50013112015-04-26 16:49:24 +01003100 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003101}
3102
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003103uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003104{
Peter Maydell50013112015-04-26 16:49:24 +01003105 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003106}
3107
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003108uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003109{
Peter Maydell50013112015-04-26 16:49:24 +01003110 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003111}
3112
bellard84b7b8e2005-11-28 21:19:04 +00003113/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003114static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3115 MemTxAttrs attrs,
3116 MemTxResult *result,
3117 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003118{
bellard84b7b8e2005-11-28 21:19:04 +00003119 uint8_t *ptr;
3120 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003121 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003122 hwaddr l = 8;
3123 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003124 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003125 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003126
Paolo Bonzini41063e12015-03-18 14:21:43 +01003127 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003128 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003129 false);
3130 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003131 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003132
bellard84b7b8e2005-11-28 21:19:04 +00003133 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003134 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003135#if defined(TARGET_WORDS_BIGENDIAN)
3136 if (endian == DEVICE_LITTLE_ENDIAN) {
3137 val = bswap64(val);
3138 }
3139#else
3140 if (endian == DEVICE_BIG_ENDIAN) {
3141 val = bswap64(val);
3142 }
3143#endif
bellard84b7b8e2005-11-28 21:19:04 +00003144 } else {
3145 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003146 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003147 switch (endian) {
3148 case DEVICE_LITTLE_ENDIAN:
3149 val = ldq_le_p(ptr);
3150 break;
3151 case DEVICE_BIG_ENDIAN:
3152 val = ldq_be_p(ptr);
3153 break;
3154 default:
3155 val = ldq_p(ptr);
3156 break;
3157 }
Peter Maydell50013112015-04-26 16:49:24 +01003158 r = MEMTX_OK;
3159 }
3160 if (result) {
3161 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003162 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003163 if (release_lock) {
3164 qemu_mutex_unlock_iothread();
3165 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003166 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003167 return val;
3168}
3169
Peter Maydell50013112015-04-26 16:49:24 +01003170uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3171 MemTxAttrs attrs, MemTxResult *result)
3172{
3173 return address_space_ldq_internal(as, addr, attrs, result,
3174 DEVICE_NATIVE_ENDIAN);
3175}
3176
3177uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3178 MemTxAttrs attrs, MemTxResult *result)
3179{
3180 return address_space_ldq_internal(as, addr, attrs, result,
3181 DEVICE_LITTLE_ENDIAN);
3182}
3183
3184uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3185 MemTxAttrs attrs, MemTxResult *result)
3186{
3187 return address_space_ldq_internal(as, addr, attrs, result,
3188 DEVICE_BIG_ENDIAN);
3189}
3190
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003191uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003192{
Peter Maydell50013112015-04-26 16:49:24 +01003193 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003194}
3195
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003196uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003197{
Peter Maydell50013112015-04-26 16:49:24 +01003198 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003199}
3200
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003201uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003202{
Peter Maydell50013112015-04-26 16:49:24 +01003203 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003204}
3205
bellardaab33092005-10-30 20:48:42 +00003206/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003207uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3208 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003209{
3210 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003211 MemTxResult r;
3212
3213 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3214 if (result) {
3215 *result = r;
3216 }
bellardaab33092005-10-30 20:48:42 +00003217 return val;
3218}
3219
Peter Maydell50013112015-04-26 16:49:24 +01003220uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3221{
3222 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3223}
3224
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003225/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003226static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3227 hwaddr addr,
3228 MemTxAttrs attrs,
3229 MemTxResult *result,
3230 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003231{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003232 uint8_t *ptr;
3233 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003234 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003235 hwaddr l = 2;
3236 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003237 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003238 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003239
Paolo Bonzini41063e12015-03-18 14:21:43 +01003240 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003241 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003242 false);
3243 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003244 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003245
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003246 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003247 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248#if defined(TARGET_WORDS_BIGENDIAN)
3249 if (endian == DEVICE_LITTLE_ENDIAN) {
3250 val = bswap16(val);
3251 }
3252#else
3253 if (endian == DEVICE_BIG_ENDIAN) {
3254 val = bswap16(val);
3255 }
3256#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003257 } else {
3258 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003259 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260 switch (endian) {
3261 case DEVICE_LITTLE_ENDIAN:
3262 val = lduw_le_p(ptr);
3263 break;
3264 case DEVICE_BIG_ENDIAN:
3265 val = lduw_be_p(ptr);
3266 break;
3267 default:
3268 val = lduw_p(ptr);
3269 break;
3270 }
Peter Maydell50013112015-04-26 16:49:24 +01003271 r = MEMTX_OK;
3272 }
3273 if (result) {
3274 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003275 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003276 if (release_lock) {
3277 qemu_mutex_unlock_iothread();
3278 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003279 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003280 return val;
bellardaab33092005-10-30 20:48:42 +00003281}
3282
Peter Maydell50013112015-04-26 16:49:24 +01003283uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3284 MemTxAttrs attrs, MemTxResult *result)
3285{
3286 return address_space_lduw_internal(as, addr, attrs, result,
3287 DEVICE_NATIVE_ENDIAN);
3288}
3289
3290uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3291 MemTxAttrs attrs, MemTxResult *result)
3292{
3293 return address_space_lduw_internal(as, addr, attrs, result,
3294 DEVICE_LITTLE_ENDIAN);
3295}
3296
3297uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3298 MemTxAttrs attrs, MemTxResult *result)
3299{
3300 return address_space_lduw_internal(as, addr, attrs, result,
3301 DEVICE_BIG_ENDIAN);
3302}
3303
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003304uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003305{
Peter Maydell50013112015-04-26 16:49:24 +01003306 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003307}
3308
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003309uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003310{
Peter Maydell50013112015-04-26 16:49:24 +01003311 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003312}
3313
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003314uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003315{
Peter Maydell50013112015-04-26 16:49:24 +01003316 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003317}
3318
bellard8df1cd02005-01-28 22:37:22 +00003319/* warning: addr must be aligned. The ram page is not masked as dirty
3320 and the code inside is not invalidated. It is useful if the dirty
3321 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003322void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3323 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003324{
bellard8df1cd02005-01-28 22:37:22 +00003325 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003326 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003327 hwaddr l = 4;
3328 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003329 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003330 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003331 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003332
Paolo Bonzini41063e12015-03-18 14:21:43 +01003333 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003334 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003335 true);
3336 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003337 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003338
Peter Maydell50013112015-04-26 16:49:24 +01003339 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003340 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003341 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003342 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003343
Paolo Bonzini845b6212015-03-23 11:45:53 +01003344 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3345 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003346 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3347 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003348 r = MEMTX_OK;
3349 }
3350 if (result) {
3351 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003352 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003353 if (release_lock) {
3354 qemu_mutex_unlock_iothread();
3355 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003356 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003357}
3358
Peter Maydell50013112015-04-26 16:49:24 +01003359void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3360{
3361 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3362}
3363
bellard8df1cd02005-01-28 22:37:22 +00003364/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003365static inline void address_space_stl_internal(AddressSpace *as,
3366 hwaddr addr, uint32_t val,
3367 MemTxAttrs attrs,
3368 MemTxResult *result,
3369 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003370{
bellard8df1cd02005-01-28 22:37:22 +00003371 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003372 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003373 hwaddr l = 4;
3374 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003375 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003376 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003377
Paolo Bonzini41063e12015-03-18 14:21:43 +01003378 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003379 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003380 true);
3381 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003382 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003383
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003384#if defined(TARGET_WORDS_BIGENDIAN)
3385 if (endian == DEVICE_LITTLE_ENDIAN) {
3386 val = bswap32(val);
3387 }
3388#else
3389 if (endian == DEVICE_BIG_ENDIAN) {
3390 val = bswap32(val);
3391 }
3392#endif
Peter Maydell50013112015-04-26 16:49:24 +01003393 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003394 } else {
bellard8df1cd02005-01-28 22:37:22 +00003395 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003396 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003397 switch (endian) {
3398 case DEVICE_LITTLE_ENDIAN:
3399 stl_le_p(ptr, val);
3400 break;
3401 case DEVICE_BIG_ENDIAN:
3402 stl_be_p(ptr, val);
3403 break;
3404 default:
3405 stl_p(ptr, val);
3406 break;
3407 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003408 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003409 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003410 }
Peter Maydell50013112015-04-26 16:49:24 +01003411 if (result) {
3412 *result = r;
3413 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003414 if (release_lock) {
3415 qemu_mutex_unlock_iothread();
3416 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003417 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003418}
3419
3420void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3421 MemTxAttrs attrs, MemTxResult *result)
3422{
3423 address_space_stl_internal(as, addr, val, attrs, result,
3424 DEVICE_NATIVE_ENDIAN);
3425}
3426
3427void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3428 MemTxAttrs attrs, MemTxResult *result)
3429{
3430 address_space_stl_internal(as, addr, val, attrs, result,
3431 DEVICE_LITTLE_ENDIAN);
3432}
3433
3434void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3435 MemTxAttrs attrs, MemTxResult *result)
3436{
3437 address_space_stl_internal(as, addr, val, attrs, result,
3438 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003439}
3440
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003441void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003442{
Peter Maydell50013112015-04-26 16:49:24 +01003443 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003444}
3445
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003446void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003447{
Peter Maydell50013112015-04-26 16:49:24 +01003448 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003449}
3450
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003451void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003452{
Peter Maydell50013112015-04-26 16:49:24 +01003453 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003454}
3455
bellardaab33092005-10-30 20:48:42 +00003456/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003457void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3458 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003459{
3460 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003461 MemTxResult r;
3462
3463 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3464 if (result) {
3465 *result = r;
3466 }
3467}
3468
3469void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3470{
3471 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003472}
3473
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003474/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003475static inline void address_space_stw_internal(AddressSpace *as,
3476 hwaddr addr, uint32_t val,
3477 MemTxAttrs attrs,
3478 MemTxResult *result,
3479 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003480{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003481 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003482 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003483 hwaddr l = 2;
3484 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003485 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003486 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003487
Paolo Bonzini41063e12015-03-18 14:21:43 +01003488 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003489 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003490 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003491 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003492
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003493#if defined(TARGET_WORDS_BIGENDIAN)
3494 if (endian == DEVICE_LITTLE_ENDIAN) {
3495 val = bswap16(val);
3496 }
3497#else
3498 if (endian == DEVICE_BIG_ENDIAN) {
3499 val = bswap16(val);
3500 }
3501#endif
Peter Maydell50013112015-04-26 16:49:24 +01003502 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003503 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003504 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003505 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003506 switch (endian) {
3507 case DEVICE_LITTLE_ENDIAN:
3508 stw_le_p(ptr, val);
3509 break;
3510 case DEVICE_BIG_ENDIAN:
3511 stw_be_p(ptr, val);
3512 break;
3513 default:
3514 stw_p(ptr, val);
3515 break;
3516 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003517 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003518 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003519 }
Peter Maydell50013112015-04-26 16:49:24 +01003520 if (result) {
3521 *result = r;
3522 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003523 if (release_lock) {
3524 qemu_mutex_unlock_iothread();
3525 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003526 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003527}
3528
3529void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3530 MemTxAttrs attrs, MemTxResult *result)
3531{
3532 address_space_stw_internal(as, addr, val, attrs, result,
3533 DEVICE_NATIVE_ENDIAN);
3534}
3535
3536void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3537 MemTxAttrs attrs, MemTxResult *result)
3538{
3539 address_space_stw_internal(as, addr, val, attrs, result,
3540 DEVICE_LITTLE_ENDIAN);
3541}
3542
3543void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3544 MemTxAttrs attrs, MemTxResult *result)
3545{
3546 address_space_stw_internal(as, addr, val, attrs, result,
3547 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003548}
3549
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003550void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003551{
Peter Maydell50013112015-04-26 16:49:24 +01003552 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003553}
3554
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003555void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003556{
Peter Maydell50013112015-04-26 16:49:24 +01003557 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003558}
3559
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003560void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003561{
Peter Maydell50013112015-04-26 16:49:24 +01003562 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003563}
3564
bellardaab33092005-10-30 20:48:42 +00003565/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003566void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3567 MemTxAttrs attrs, MemTxResult *result)
3568{
3569 MemTxResult r;
3570 val = tswap64(val);
3571 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3572 if (result) {
3573 *result = r;
3574 }
3575}
3576
3577void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3578 MemTxAttrs attrs, MemTxResult *result)
3579{
3580 MemTxResult r;
3581 val = cpu_to_le64(val);
3582 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3583 if (result) {
3584 *result = r;
3585 }
3586}
3587void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3588 MemTxAttrs attrs, MemTxResult *result)
3589{
3590 MemTxResult r;
3591 val = cpu_to_be64(val);
3592 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3593 if (result) {
3594 *result = r;
3595 }
3596}
3597
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003598void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003599{
Peter Maydell50013112015-04-26 16:49:24 +01003600 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003601}
3602
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003603void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003604{
Peter Maydell50013112015-04-26 16:49:24 +01003605 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003606}
3607
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003608void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609{
Peter Maydell50013112015-04-26 16:49:24 +01003610 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003611}
3612
aliguori5e2972f2009-03-28 17:51:36 +00003613/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003614int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003615 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003616{
3617 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003618 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003619 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003620
3621 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003622 int asidx;
3623 MemTxAttrs attrs;
3624
bellard13eb76e2004-01-24 15:23:36 +00003625 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003626 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3627 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003628 /* if no physical page mapped, return an error */
3629 if (phys_addr == -1)
3630 return -1;
3631 l = (page + TARGET_PAGE_SIZE) - addr;
3632 if (l > len)
3633 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003634 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003635 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003636 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3637 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003638 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003639 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3640 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003641 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003642 }
bellard13eb76e2004-01-24 15:23:36 +00003643 len -= l;
3644 buf += l;
3645 addr += l;
3646 }
3647 return 0;
3648}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003649
3650/*
3651 * Allows code that needs to deal with migration bitmaps etc to still be built
3652 * target independent.
3653 */
3654size_t qemu_target_page_bits(void)
3655{
3656 return TARGET_PAGE_BITS;
3657}
3658
Paul Brooka68fe892010-03-01 00:08:59 +00003659#endif
bellard13eb76e2004-01-24 15:23:36 +00003660
Blue Swirl8e4a4242013-01-06 18:30:17 +00003661/*
3662 * A helper function for the _utterly broken_ virtio device model to find out if
3663 * it's running on a big endian machine. Don't do this at home kids!
3664 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003665bool target_words_bigendian(void);
3666bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003667{
3668#if defined(TARGET_WORDS_BIGENDIAN)
3669 return true;
3670#else
3671 return false;
3672#endif
3673}
3674
Wen Congyang76f35532012-05-07 12:04:18 +08003675#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003676bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003677{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003678 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003679 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003680 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003681
Paolo Bonzini41063e12015-03-18 14:21:43 +01003682 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003683 mr = address_space_translate(&address_space_memory,
3684 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003685
Paolo Bonzini41063e12015-03-18 14:21:43 +01003686 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3687 rcu_read_unlock();
3688 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003689}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003690
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003691int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003692{
3693 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003694 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003695
Mike Day0dc3f442013-09-05 14:41:35 -04003696 rcu_read_lock();
3697 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003698 ret = func(block->idstr, block->host, block->offset,
3699 block->used_length, opaque);
3700 if (ret) {
3701 break;
3702 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003703 }
Mike Day0dc3f442013-09-05 14:41:35 -04003704 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003705 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003706}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003707#endif