blob: ab3062936701479cb1c1435b93889aa9e33a330e [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Peter Maydell20bccb82016-10-24 16:26:49 +010096#ifdef TARGET_PAGE_BITS_VARY
97int target_page_bits;
98bool target_page_bits_decided;
99#endif
100
Andreas Färberbdc44642013-06-24 23:50:24 +0200101struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +0000102/* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +0200104__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000105/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000106 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000107 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100108int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000109
Peter Maydell20bccb82016-10-24 16:26:49 +0100110bool set_preferred_target_page_bits(int bits)
111{
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
115 * a particular size.
116 */
117#ifdef TARGET_PAGE_BITS_VARY
118 assert(bits >= TARGET_PAGE_BITS_MIN);
119 if (target_page_bits == 0 || target_page_bits > bits) {
120 if (target_page_bits_decided) {
121 return false;
122 }
123 target_page_bits = bits;
124 }
125#endif
126 return true;
127}
128
pbrooke2eef172008-06-08 01:09:01 +0000129#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200130
Peter Maydell20bccb82016-10-24 16:26:49 +0100131static void finalize_target_page_bits(void)
132{
133#ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits == 0) {
135 target_page_bits = TARGET_PAGE_BITS_MIN;
136 }
137 target_page_bits_decided = true;
138#endif
139}
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141typedef struct PhysPageEntry PhysPageEntry;
142
143struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200145 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200147 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200150#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
151
Paolo Bonzini03f49952013-11-07 17:14:36 +0100152/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100153#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100154
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200155#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100156#define P_L2_SIZE (1 << P_L2_BITS)
157
158#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
159
160typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200161
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100163 struct rcu_head rcu;
164
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 unsigned sections_nb;
166 unsigned sections_nb_alloc;
167 unsigned nodes_nb;
168 unsigned nodes_nb_alloc;
169 Node *nodes;
170 MemoryRegionSection *sections;
171} PhysPageMap;
172
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200173struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100174 struct rcu_head rcu;
175
Fam Zheng729633c2016-03-01 14:18:24 +0800176 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
179 */
180 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200182 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200183};
184
Jan Kiszka90260c62013-05-26 21:46:51 +0200185#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186typedef struct subpage_t {
187 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200188 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200189 hwaddr base;
Vijaya Kumar K2615fab2016-10-24 16:26:49 +0100190 uint16_t sub_section[];
Jan Kiszka90260c62013-05-26 21:46:51 +0200191} subpage_t;
192
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200193#define PHYS_SECTION_UNASSIGNED 0
194#define PHYS_SECTION_NOTDIRTY 1
195#define PHYS_SECTION_ROM 2
196#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
pbrooke2eef172008-06-08 01:09:01 +0000198static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300199static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000200static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000201
Avi Kivity1ec9b902012-01-02 12:47:48 +0200202static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100203
204/**
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
210 */
211struct CPUAddressSpace {
212 CPUState *cpu;
213 AddressSpace *as;
214 struct AddressSpaceDispatch *memory_dispatch;
215 MemoryListener tcg_as_listener;
216};
217
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard54936002003-05-13 00:25:15 +0000219
Paul Brook6d9a1302010-02-28 23:55:53 +0000220#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200221
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200222static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223{
Peter Lieven101420b2016-07-15 12:03:50 +0200224 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200225 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200226 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
228 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200229 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200230 }
231}
232
Paolo Bonzinidb946042015-05-21 15:12:29 +0200233static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200234{
235 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200236 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200237 PhysPageEntry e;
238 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200239
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200240 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200241 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200242 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200244
245 e.skip = leaf ? 0 : 1;
246 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100247 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200248 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200249 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200250 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200251}
252
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200253static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
254 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200255 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200256{
257 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100258 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200259
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200260 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200261 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200262 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200263 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100264 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200265
Paolo Bonzini03f49952013-11-07 17:14:36 +0100266 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200267 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200268 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200269 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200270 *index += step;
271 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200272 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200273 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200274 }
275 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200276 }
277}
278
Avi Kivityac1970f2012-10-03 16:22:53 +0200279static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200280 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200281 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000282{
Avi Kivity29990972012-02-13 20:21:20 +0200283 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200284 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000285
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200286 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000287}
288
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200289/* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
291 */
Marc-André Lureauefee6782016-09-28 16:37:20 +0400292static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200293{
294 unsigned valid_ptr = P_L2_SIZE;
295 int valid = 0;
296 PhysPageEntry *p;
297 int i;
298
299 if (lp->ptr == PHYS_MAP_NODE_NIL) {
300 return;
301 }
302
303 p = nodes[lp->ptr];
304 for (i = 0; i < P_L2_SIZE; i++) {
305 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
306 continue;
307 }
308
309 valid_ptr = i;
310 valid++;
311 if (p[i].skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400312 phys_page_compact(&p[i], nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200313 }
314 }
315
316 /* We can only compress if there's only one child. */
317 if (valid != 1) {
318 return;
319 }
320
321 assert(valid_ptr < P_L2_SIZE);
322
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
325 return;
326 }
327
328 lp->ptr = p[valid_ptr].ptr;
329 if (!p[valid_ptr].skip) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
334 * change this rule.
335 */
336 lp->skip = 0;
337 } else {
338 lp->skip += p[valid_ptr].skip;
339 }
340}
341
342static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
343{
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 if (d->phys_map.skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400345 phys_page_compact(&d->phys_map, d->map.nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200346 }
347}
348
Fam Zheng29cb5332016-03-01 14:18:23 +0800349static inline bool section_covers_addr(const MemoryRegionSection *section,
350 hwaddr addr)
351{
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
354 */
Richard Henderson258dfaa2016-06-29 15:48:03 -0700355 return int128_gethi(section->size) ||
Fam Zheng29cb5332016-03-01 14:18:23 +0800356 range_covers_byte(section->offset_within_address_space,
Richard Henderson258dfaa2016-06-29 15:48:03 -0700357 int128_getlo(section->size), addr);
Fam Zheng29cb5332016-03-01 14:18:23 +0800358}
359
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200360static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200361 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000362{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200363 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200364 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200365 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200366
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200367 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200368 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200369 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200370 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200371 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100372 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200373 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200374
Fam Zheng29cb5332016-03-01 14:18:23 +0800375 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200376 return &sections[lp.ptr];
377 } else {
378 return &sections[PHYS_SECTION_UNASSIGNED];
379 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200380}
381
Blue Swirle5548612012-04-21 13:08:33 +0000382bool memory_region_is_unassigned(MemoryRegion *mr)
383{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200384 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000385 && mr != &io_mem_watch;
386}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100388/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200390 hwaddr addr,
391 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200392{
Fam Zheng729633c2016-03-01 14:18:24 +0800393 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200394 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800395 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200396
Fam Zheng729633c2016-03-01 14:18:24 +0800397 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
398 section_covers_addr(section, addr)) {
399 update = false;
400 } else {
401 section = phys_page_find(d->phys_map, addr, d->map.nodes,
402 d->map.sections);
403 update = true;
404 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200405 if (resolve_subpage && section->mr->subpage) {
406 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200407 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200408 }
Fam Zheng729633c2016-03-01 14:18:24 +0800409 if (update) {
410 atomic_set(&d->mru_section, section);
411 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200412 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200413}
414
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100415/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200416static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200417address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200418 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200419{
420 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200421 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100422 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200423
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200424 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200425 /* Compute offset within MemoryRegionSection */
426 addr -= section->offset_within_address_space;
427
428 /* Compute offset within MemoryRegion */
429 *xlat = addr + section->offset_within_region;
430
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200431 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200432
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
438 * here.
439 *
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
443 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200444 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200445 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200446 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
447 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200448 return section;
449}
Jan Kiszka90260c62013-05-26 21:46:51 +0200450
Paolo Bonzini41063e12015-03-18 14:21:43 +0100451/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200452MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
453 hwaddr *xlat, hwaddr *plen,
454 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200455{
Avi Kivity30951152012-10-30 13:47:46 +0200456 IOMMUTLBEntry iotlb;
457 MemoryRegionSection *section;
458 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200459
460 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100461 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
462 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200463 mr = section->mr;
464
465 if (!mr->iommu_ops) {
466 break;
467 }
468
Le Tan8d7b8cb2014-08-16 13:55:37 +0800469 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200470 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
471 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700472 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200473 if (!(iotlb.perm & (1 << is_write))) {
474 mr = &io_mem_unassigned;
475 break;
476 }
477
478 as = iotlb.target_as;
479 }
480
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000481 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100482 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700483 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100484 }
485
Avi Kivity30951152012-10-30 13:47:46 +0200486 *xlat = addr;
487 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200488}
489
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100490/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200491MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000492address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200493 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200494{
Avi Kivity30951152012-10-30 13:47:46 +0200495 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000496 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
497
498 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200499
500 assert(!section->mr->iommu_ops);
501 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200502}
bellard9fa3e852004-01-04 18:06:42 +0000503#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000504
Andreas Färberb170fce2013-01-20 20:23:22 +0100505#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000506
Juan Quintelae59fb372009-09-29 22:48:21 +0200507static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200508{
Andreas Färber259186a2013-01-17 18:51:17 +0100509 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510
aurel323098dba2009-03-07 21:28:24 +0000511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100513 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100514 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000515
516 return 0;
517}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200518
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400519static int cpu_common_pre_load(void *opaque)
520{
521 CPUState *cpu = opaque;
522
Paolo Bonziniadee6422014-12-19 12:53:14 +0100523 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524
525 return 0;
526}
527
528static bool cpu_common_exception_index_needed(void *opaque)
529{
530 CPUState *cpu = opaque;
531
Paolo Bonziniadee6422014-12-19 12:53:14 +0100532 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533}
534
535static const VMStateDescription vmstate_cpu_common_exception_index = {
536 .name = "cpu_common/exception_index",
537 .version_id = 1,
538 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200539 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 .fields = (VMStateField[]) {
541 VMSTATE_INT32(exception_index, CPUState),
542 VMSTATE_END_OF_LIST()
543 }
544};
545
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300546static bool cpu_common_crash_occurred_needed(void *opaque)
547{
548 CPUState *cpu = opaque;
549
550 return cpu->crash_occurred;
551}
552
553static const VMStateDescription vmstate_cpu_common_crash_occurred = {
554 .name = "cpu_common/crash_occurred",
555 .version_id = 1,
556 .minimum_version_id = 1,
557 .needed = cpu_common_crash_occurred_needed,
558 .fields = (VMStateField[]) {
559 VMSTATE_BOOL(crash_occurred, CPUState),
560 VMSTATE_END_OF_LIST()
561 }
562};
563
Andreas Färber1a1562f2013-06-17 04:09:11 +0200564const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200565 .name = "cpu_common",
566 .version_id = 1,
567 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400568 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200569 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200570 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100571 VMSTATE_UINT32(halted, CPUState),
572 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200573 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400574 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200575 .subsections = (const VMStateDescription*[]) {
576 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300577 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200578 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200579 }
580};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200581
pbrook9656f322008-07-01 20:01:19 +0000582#endif
583
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100584CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400585{
Andreas Färberbdc44642013-06-24 23:50:24 +0200586 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400587
Andreas Färberbdc44642013-06-24 23:50:24 +0200588 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100589 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200590 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100591 }
Glauber Costa950f1472009-06-09 12:15:18 -0400592 }
593
Andreas Färberbdc44642013-06-24 23:50:24 +0200594 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400595}
596
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000598void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000600 CPUAddressSpace *newas;
601
602 /* Target code should have set num_ases before calling us */
603 assert(asidx < cpu->num_ases);
604
Peter Maydell56943e82016-01-21 14:15:04 +0000605 if (asidx == 0) {
606 /* address space 0 gets the convenience alias */
607 cpu->as = as;
608 }
609
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000612
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000613 if (!cpu->cpu_ases) {
614 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000615 }
Peter Maydell32857f42015-10-01 15:29:50 +0100616
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000617 newas = &cpu->cpu_ases[asidx];
618 newas->cpu = cpu;
619 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000620 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000621 newas->tcg_as_listener.commit = tcg_commit;
622 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000623 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000624}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000625
626AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
627{
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu->cpu_ases[asidx].as;
630}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000631#endif
632
Laurent Vivier7bbc1242016-10-20 13:26:04 +0200633void cpu_exec_unrealizefn(CPUState *cpu)
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530635 CPUClass *cc = CPU_GET_CLASS(cpu);
636
Paolo Bonzini267f6852016-08-28 03:45:14 +0200637 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530638
639 if (cc->vmsd != NULL) {
640 vmstate_unregister(NULL, cc->vmsd, cpu);
641 }
642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
644 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645}
646
Laurent Vivier39e329e2016-10-20 13:26:02 +0200647void cpu_exec_initfn(CPUState *cpu)
bellardfd6ce8f2003-05-14 19:00:11 +0000648{
Peter Maydell56943e82016-01-21 14:15:04 +0000649 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000650 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000651
Eduardo Habkost291135b2015-04-27 17:00:33 -0300652#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300653 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000654
655 /* This is a softmmu CPU object, so create a property for it
656 * so users can wire up its memory. (This can't go in qom/cpu.c
657 * because that file is compiled only once for both user-mode
658 * and system builds.) The default if no link is set up is to use
659 * the system address space.
660 */
661 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
662 (Object **)&cpu->memory,
663 qdev_prop_allow_set_link_before_realize,
664 OBJ_PROP_LINK_UNREF_ON_RELEASE,
665 &error_abort);
666 cpu->memory = system_memory;
667 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300668#endif
Laurent Vivier39e329e2016-10-20 13:26:02 +0200669}
670
Laurent Vivierce5b1bb2016-10-20 13:26:03 +0200671void cpu_exec_realizefn(CPUState *cpu, Error **errp)
Laurent Vivier39e329e2016-10-20 13:26:02 +0200672{
673 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Eduardo Habkost291135b2015-04-27 17:00:33 -0300674
Paolo Bonzini267f6852016-08-28 03:45:14 +0200675 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200676
677#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200678 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200679 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200680 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100681 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100683 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000685}
686
Paul Brook94df27f2010-02-28 23:47:45 +0000687#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200688static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000689{
690 tb_invalidate_phys_page_range(pc, pc + 1, 0);
691}
692#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200693static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400694{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000695 MemTxAttrs attrs;
696 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
697 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400698 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000699 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100700 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400701 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400702}
bellardc27004e2005-01-03 23:35:10 +0000703#endif
bellardd720b932004-04-25 17:57:43 +0000704
Paul Brookc527ee82010-03-01 03:31:14 +0000705#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200706void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000707
708{
709}
710
Peter Maydell3ee887e2014-09-12 14:06:48 +0100711int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
712 int flags)
713{
714 return -ENOSYS;
715}
716
717void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
718{
719}
720
Andreas Färber75a34032013-09-02 16:57:02 +0200721int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000722 int flags, CPUWatchpoint **watchpoint)
723{
724 return -ENOSYS;
725}
726#else
pbrook6658ffb2007-03-16 23:58:11 +0000727/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000729 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000730{
aliguoric0ce9982008-11-25 22:13:57 +0000731 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000732
Peter Maydell05068c02014-09-12 14:06:48 +0100733 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700734 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200735 error_report("tried to set invalid watchpoint at %"
736 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000737 return -EINVAL;
738 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500739 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000740
aliguoria1d1bb32008-11-18 20:07:32 +0000741 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100742 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000743 wp->flags = flags;
744
aliguori2dc9f412008-11-18 20:56:59 +0000745 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200746 if (flags & BP_GDB) {
747 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
748 } else {
749 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
750 }
aliguoria1d1bb32008-11-18 20:07:32 +0000751
Andreas Färber31b030d2013-09-04 01:29:02 +0200752 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000753
754 if (watchpoint)
755 *watchpoint = wp;
756 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000757}
758
aliguoria1d1bb32008-11-18 20:07:32 +0000759/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200760int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000761 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000762{
aliguoria1d1bb32008-11-18 20:07:32 +0000763 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000764
Andreas Färberff4700b2013-08-26 18:23:18 +0200765 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100766 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000767 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200768 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000769 return 0;
770 }
771 }
aliguoria1d1bb32008-11-18 20:07:32 +0000772 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000773}
774
aliguoria1d1bb32008-11-18 20:07:32 +0000775/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200776void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000777{
Andreas Färberff4700b2013-08-26 18:23:18 +0200778 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000779
Andreas Färber31b030d2013-09-04 01:29:02 +0200780 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000781
Anthony Liguori7267c092011-08-20 22:09:37 -0500782 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000783}
784
aliguoria1d1bb32008-11-18 20:07:32 +0000785/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200786void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000787{
aliguoric0ce9982008-11-25 22:13:57 +0000788 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000789
Andreas Färberff4700b2013-08-26 18:23:18 +0200790 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200791 if (wp->flags & mask) {
792 cpu_watchpoint_remove_by_ref(cpu, wp);
793 }
aliguoric0ce9982008-11-25 22:13:57 +0000794 }
aliguoria1d1bb32008-11-18 20:07:32 +0000795}
Peter Maydell05068c02014-09-12 14:06:48 +0100796
797/* Return true if this watchpoint address matches the specified
798 * access (ie the address range covered by the watchpoint overlaps
799 * partially or completely with the address range covered by the
800 * access).
801 */
802static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
803 vaddr addr,
804 vaddr len)
805{
806 /* We know the lengths are non-zero, but a little caution is
807 * required to avoid errors in the case where the range ends
808 * exactly at the top of the address space and so addr + len
809 * wraps round to zero.
810 */
811 vaddr wpend = wp->vaddr + wp->len - 1;
812 vaddr addrend = addr + len - 1;
813
814 return !(addr > wpend || wp->vaddr > addrend);
815}
816
Paul Brookc527ee82010-03-01 03:31:14 +0000817#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000818
819/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000821 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000822{
aliguoric0ce9982008-11-25 22:13:57 +0000823 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000824
Anthony Liguori7267c092011-08-20 22:09:37 -0500825 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000826
827 bp->pc = pc;
828 bp->flags = flags;
829
aliguori2dc9f412008-11-18 20:56:59 +0000830 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200831 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200832 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200833 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200834 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200835 }
aliguoria1d1bb32008-11-18 20:07:32 +0000836
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000838
Andreas Färber00b941e2013-06-29 18:55:54 +0200839 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000840 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200841 }
aliguoria1d1bb32008-11-18 20:07:32 +0000842 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000843}
844
845/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200846int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000847{
aliguoria1d1bb32008-11-18 20:07:32 +0000848 CPUBreakpoint *bp;
849
Andreas Färberf0c3c502013-08-26 21:22:53 +0200850 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000851 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200852 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000853 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000854 }
bellard4c3a88a2003-07-26 12:06:08 +0000855 }
aliguoria1d1bb32008-11-18 20:07:32 +0000856 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000857}
858
aliguoria1d1bb32008-11-18 20:07:32 +0000859/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200860void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000861{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200862 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
863
864 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000865
Anthony Liguori7267c092011-08-20 22:09:37 -0500866 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000867}
868
869/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200870void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000871{
aliguoric0ce9982008-11-25 22:13:57 +0000872 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000873
Andreas Färberf0c3c502013-08-26 21:22:53 +0200874 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200875 if (bp->flags & mask) {
876 cpu_breakpoint_remove_by_ref(cpu, bp);
877 }
aliguoric0ce9982008-11-25 22:13:57 +0000878 }
bellard4c3a88a2003-07-26 12:06:08 +0000879}
880
bellardc33a3462003-07-29 20:50:33 +0000881/* enable or disable single step mode. EXCP_DEBUG is returned by the
882 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200883void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000884{
Andreas Färbered2803d2013-06-21 20:20:45 +0200885 if (cpu->singlestep_enabled != enabled) {
886 cpu->singlestep_enabled = enabled;
887 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200888 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200889 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100890 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000891 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700892 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000893 }
bellardc33a3462003-07-29 20:50:33 +0000894 }
bellardc33a3462003-07-29 20:50:33 +0000895}
896
Andreas Färbera47dddd2013-09-03 17:38:47 +0200897void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000898{
899 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000900 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000901
902 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000903 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000904 fprintf(stderr, "qemu: fatal: ");
905 vfprintf(stderr, fmt, ap);
906 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200907 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100908 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000909 qemu_log("qemu: fatal: ");
910 qemu_log_vprintf(fmt, ap2);
911 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200912 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000913 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000914 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000915 }
pbrook493ae1f2007-11-23 16:53:59 +0000916 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000917 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300918 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200919#if defined(CONFIG_USER_ONLY)
920 {
921 struct sigaction act;
922 sigfillset(&act.sa_mask);
923 act.sa_handler = SIG_DFL;
924 sigaction(SIGABRT, &act, NULL);
925 }
926#endif
bellard75012672003-06-21 13:11:07 +0000927 abort();
928}
929
bellard01243112004-01-04 15:48:17 +0000930#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400931/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
933{
934 RAMBlock *block;
935
Paolo Bonzini43771532013-09-09 17:58:40 +0200936 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200937 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200938 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939 }
Mike Day0dc3f442013-09-05 14:41:35 -0400940 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200941 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200942 goto found;
943 }
944 }
945
946 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
947 abort();
948
949found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200950 /* It is safe to write mru_block outside the iothread lock. This
951 * is what happens:
952 *
953 * mru_block = xxx
954 * rcu_read_unlock()
955 * xxx removed from list
956 * rcu_read_lock()
957 * read mru_block
958 * mru_block = NULL;
959 * call_rcu(reclaim_ramblock, xxx);
960 * rcu_read_unlock()
961 *
962 * atomic_rcu_set is not needed here. The block was already published
963 * when it was placed into the list. Here we're just making an extra
964 * copy of the pointer.
965 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200966 ram_list.mru_block = block;
967 return block;
968}
969
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200970static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000971{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700972 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200973 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200974 RAMBlock *block;
975 ram_addr_t end;
976
977 end = TARGET_PAGE_ALIGN(start + length);
978 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000979
Mike Day0dc3f442013-09-05 14:41:35 -0400980 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200981 block = qemu_get_ram_block(start);
982 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200983 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700984 CPU_FOREACH(cpu) {
985 tlb_reset_dirty(cpu, start1, length);
986 }
Mike Day0dc3f442013-09-05 14:41:35 -0400987 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200988}
989
990/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000991bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
992 ram_addr_t length,
993 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200994{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000995 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000996 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000997 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200998
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000999 if (length == 0) {
1000 return false;
1001 }
1002
1003 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1004 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001005
1006 rcu_read_lock();
1007
1008 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1009
1010 while (page < end) {
1011 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1012 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1013 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1014
1015 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1016 offset, num);
1017 page += num;
1018 }
1019
1020 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001021
1022 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001023 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001024 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001025
1026 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001027}
1028
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001029/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001030hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001031 MemoryRegionSection *section,
1032 target_ulong vaddr,
1033 hwaddr paddr, hwaddr xlat,
1034 int prot,
1035 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001036{
Avi Kivitya8170e52012-10-23 12:30:10 +02001037 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001038 CPUWatchpoint *wp;
1039
Blue Swirlcc5bea62012-04-14 14:56:48 +00001040 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001041 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001042 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001043 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001044 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001045 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001046 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001047 }
1048 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001049 AddressSpaceDispatch *d;
1050
1051 d = atomic_rcu_read(&section->address_space->dispatch);
1052 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001053 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001054 }
1055
1056 /* Make accesses to pages with watchpoints go via the
1057 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001058 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001059 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001060 /* Avoid trapping reads of pages with a write breakpoint. */
1061 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001062 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001063 *address |= TLB_MMIO;
1064 break;
1065 }
1066 }
1067 }
1068
1069 return iotlb;
1070}
bellard9fa3e852004-01-04 18:06:42 +00001071#endif /* defined(CONFIG_USER_ONLY) */
1072
pbrooke2eef172008-06-08 01:09:01 +00001073#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001074
Anthony Liguoric227f092009-10-01 16:12:16 -05001075static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001076 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001077static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001078
Igor Mammedova2b257d2014-10-31 16:38:37 +00001079static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1080 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001081
1082/*
1083 * Set a custom physical guest memory alloator.
1084 * Accelerators with unusual needs may need this. Hopefully, we can
1085 * get rid of it eventually.
1086 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001087void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001088{
1089 phys_mem_alloc = alloc;
1090}
1091
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001092static uint16_t phys_section_add(PhysPageMap *map,
1093 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001094{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001095 /* The physical section number is ORed with a page-aligned
1096 * pointer to produce the iotlb entries. Thus it should
1097 * never overflow into the page-aligned value.
1098 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001100
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001101 if (map->sections_nb == map->sections_nb_alloc) {
1102 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1103 map->sections = g_renew(MemoryRegionSection, map->sections,
1104 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001105 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001107 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001109}
1110
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001111static void phys_section_destroy(MemoryRegion *mr)
1112{
Don Slutz55b4e802015-11-30 17:11:04 -05001113 bool have_sub_page = mr->subpage;
1114
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001115 memory_region_unref(mr);
1116
Don Slutz55b4e802015-11-30 17:11:04 -05001117 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001118 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001119 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001120 g_free(subpage);
1121 }
1122}
1123
Paolo Bonzini60926662013-05-29 12:30:26 +02001124static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001125{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001126 while (map->sections_nb > 0) {
1127 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001128 phys_section_destroy(section->mr);
1129 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001130 g_free(map->sections);
1131 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001132}
1133
Avi Kivityac1970f2012-10-03 16:22:53 +02001134static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135{
1136 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001137 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001138 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001139 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001140 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001141 MemoryRegionSection subsection = {
1142 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001145 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146
Avi Kivityf3705d52012-03-08 16:16:34 +02001147 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148
Avi Kivityf3705d52012-03-08 16:16:34 +02001149 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001150 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001151 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001153 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001154 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001155 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001156 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 }
1158 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001160 subpage_register(subpage, start, end,
1161 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162}
1163
1164
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001165static void register_multipage(AddressSpaceDispatch *d,
1166 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001167{
Avi Kivitya8170e52012-10-23 12:30:10 +02001168 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001169 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001170 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1171 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001172
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001173 assert(num_pages);
1174 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001175}
1176
Avi Kivityac1970f2012-10-03 16:22:53 +02001177static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001178{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001179 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001180 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001181 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001182 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001183
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001184 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1185 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1186 - now.offset_within_address_space;
1187
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001188 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001189 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001190 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001191 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001192 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 while (int128_ne(remain.size, now.size)) {
1194 remain.size = int128_sub(remain.size, now.size);
1195 remain.offset_within_address_space += int128_get64(now.size);
1196 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001197 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001199 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001200 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001201 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001202 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001203 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001204 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001205 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001206 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001207 }
1208}
1209
Sheng Yang62a27442010-01-26 19:21:16 +08001210void qemu_flush_coalesced_mmio_buffer(void)
1211{
1212 if (kvm_enabled())
1213 kvm_flush_coalesced_mmio_buffer();
1214}
1215
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001216void qemu_mutex_lock_ramlist(void)
1217{
1218 qemu_mutex_lock(&ram_list.mutex);
1219}
1220
1221void qemu_mutex_unlock_ramlist(void)
1222{
1223 qemu_mutex_unlock(&ram_list.mutex);
1224}
1225
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001226#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001227static void *file_ram_alloc(RAMBlock *block,
1228 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001229 const char *path,
1230 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001232 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001233 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001234 char *sanitized_name;
1235 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001236 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001237 int fd = -1;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001238
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1240 error_setg(errp,
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
1242 return NULL;
1243 }
1244
1245 for (;;) {
1246 fd = open(path, O_RDWR);
1247 if (fd >= 0) {
1248 /* @path names an existing file, use it */
1249 break;
1250 }
1251 if (errno == ENOENT) {
1252 /* @path names a file that doesn't exist, create it */
1253 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1254 if (fd >= 0) {
1255 unlink_on_error = true;
1256 break;
1257 }
1258 } else if (errno == EISDIR) {
1259 /* @path names a directory, create a file there */
1260 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1261 sanitized_name = g_strdup(memory_region_name(block->mr));
1262 for (c = sanitized_name; *c != '\0'; c++) {
1263 if (*c == '/') {
1264 *c = '_';
1265 }
1266 }
1267
1268 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1269 sanitized_name);
1270 g_free(sanitized_name);
1271
1272 fd = mkstemp(filename);
1273 if (fd >= 0) {
1274 unlink(filename);
1275 g_free(filename);
1276 break;
1277 }
1278 g_free(filename);
1279 }
1280 if (errno != EEXIST && errno != EINTR) {
1281 error_setg_errno(errp, errno,
1282 "can't open backing store %s for guest RAM",
1283 path);
1284 goto error;
1285 }
1286 /*
1287 * Try again on EINTR and EEXIST. The latter happens when
1288 * something else creates the file between our two open().
1289 */
1290 }
1291
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001292 block->page_size = qemu_fd_getpagesize(fd);
Haozhong Zhang83606682016-10-24 20:49:37 +08001293 block->mr->align = block->page_size;
1294#if defined(__s390x__)
1295 if (kvm_enabled()) {
1296 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1297 }
1298#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03001299
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001300 if (memory < block->page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001301 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001302 "or larger than page size 0x%zx",
1303 memory, block->page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001304 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001305 }
1306
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001307 memory = ROUND_UP(memory, block->page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001308
1309 /*
1310 * ftruncate is not supported by hugetlbfs in older
1311 * hosts, so don't bother bailing out on errors.
1312 * If anything goes wrong with it under other filesystems,
1313 * mmap will fail.
1314 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001315 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001316 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001317 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001318
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001319 area = qemu_ram_mmap(fd, memory, block->mr->align,
1320 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001321 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001322 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001323 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001324 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001325 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001326
1327 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001328 os_mem_prealloc(fd, area, memory, errp);
1329 if (errp && *errp) {
1330 goto error;
1331 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001332 }
1333
Alex Williamson04b16652010-07-02 11:13:17 -06001334 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001335 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001336
1337error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001338 if (area != MAP_FAILED) {
1339 qemu_ram_munmap(area, memory);
1340 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001341 if (unlink_on_error) {
1342 unlink(path);
1343 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001344 if (fd != -1) {
1345 close(fd);
1346 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001347 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001348}
1349#endif
1350
Mike Day0dc3f442013-09-05 14:41:35 -04001351/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001352static ram_addr_t find_ram_offset(ram_addr_t size)
1353{
Alex Williamson04b16652010-07-02 11:13:17 -06001354 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001355 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001356
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001357 assert(size != 0); /* it would hand out same offset multiple times */
1358
Mike Day0dc3f442013-09-05 14:41:35 -04001359 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001360 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001361 }
Alex Williamson04b16652010-07-02 11:13:17 -06001362
Mike Day0dc3f442013-09-05 14:41:35 -04001363 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001364 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001365
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001366 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001367
Mike Day0dc3f442013-09-05 14:41:35 -04001368 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001369 if (next_block->offset >= end) {
1370 next = MIN(next, next_block->offset);
1371 }
1372 }
1373 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001374 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001375 mingap = next - end;
1376 }
1377 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001378
1379 if (offset == RAM_ADDR_MAX) {
1380 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1381 (uint64_t)size);
1382 abort();
1383 }
1384
Alex Williamson04b16652010-07-02 11:13:17 -06001385 return offset;
1386}
1387
Juan Quintela652d7ec2012-07-20 10:37:54 +02001388ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001389{
Alex Williamsond17b5282010-06-25 11:08:38 -06001390 RAMBlock *block;
1391 ram_addr_t last = 0;
1392
Mike Day0dc3f442013-09-05 14:41:35 -04001393 rcu_read_lock();
1394 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001395 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001396 }
Mike Day0dc3f442013-09-05 14:41:35 -04001397 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001398 return last;
1399}
1400
Jason Baronddb97f12012-08-02 15:44:16 -04001401static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1402{
1403 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001404
1405 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001406 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001407 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1408 if (ret) {
1409 perror("qemu_madvise");
1410 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1411 "but dump_guest_core=off specified\n");
1412 }
1413 }
1414}
1415
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001416const char *qemu_ram_get_idstr(RAMBlock *rb)
1417{
1418 return rb->idstr;
1419}
1420
Mike Dayae3a7042013-09-05 14:41:35 -04001421/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001422void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001423{
Gongleifa53a0e2016-05-10 10:04:59 +08001424 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001425
Avi Kivityc5705a72011-12-20 15:59:12 +02001426 assert(new_block);
1427 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001428
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001429 if (dev) {
1430 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001431 if (id) {
1432 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001433 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001434 }
1435 }
1436 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1437
Gongleiab0a9952016-05-10 10:05:00 +08001438 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001439 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001440 if (block != new_block &&
1441 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001442 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1443 new_block->idstr);
1444 abort();
1445 }
1446 }
Mike Day0dc3f442013-09-05 14:41:35 -04001447 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001448}
1449
Mike Dayae3a7042013-09-05 14:41:35 -04001450/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001451void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001452{
Mike Dayae3a7042013-09-05 14:41:35 -04001453 /* FIXME: arch_init.c assumes that this is not called throughout
1454 * migration. Ignore the problem since hot-unplug during migration
1455 * does not work anyway.
1456 */
Hu Tao20cfe882014-04-02 15:13:26 +08001457 if (block) {
1458 memset(block->idstr, 0, sizeof(block->idstr));
1459 }
1460}
1461
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001462size_t qemu_ram_pagesize(RAMBlock *rb)
1463{
1464 return rb->page_size;
1465}
1466
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001467static int memory_try_enable_merging(void *addr, size_t len)
1468{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001469 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001470 /* disabled by the user */
1471 return 0;
1472 }
1473
1474 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1475}
1476
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001477/* Only legal before guest might have detected the memory size: e.g. on
1478 * incoming migration, or right after reset.
1479 *
1480 * As memory core doesn't know how is memory accessed, it is up to
1481 * resize callback to update device state and/or add assertions to detect
1482 * misuse, if necessary.
1483 */
Gongleifa53a0e2016-05-10 10:04:59 +08001484int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001485{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001486 assert(block);
1487
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001488 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001489
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001490 if (block->used_length == newsize) {
1491 return 0;
1492 }
1493
1494 if (!(block->flags & RAM_RESIZEABLE)) {
1495 error_setg_errno(errp, EINVAL,
1496 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1497 " in != 0x" RAM_ADDR_FMT, block->idstr,
1498 newsize, block->used_length);
1499 return -EINVAL;
1500 }
1501
1502 if (block->max_length < newsize) {
1503 error_setg_errno(errp, EINVAL,
1504 "Length too large: %s: 0x" RAM_ADDR_FMT
1505 " > 0x" RAM_ADDR_FMT, block->idstr,
1506 newsize, block->max_length);
1507 return -EINVAL;
1508 }
1509
1510 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1511 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001512 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1513 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001514 memory_region_set_size(block->mr, newsize);
1515 if (block->resized) {
1516 block->resized(block->idstr, newsize, block->host);
1517 }
1518 return 0;
1519}
1520
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001521/* Called with ram_list.mutex held */
1522static void dirty_memory_extend(ram_addr_t old_ram_size,
1523 ram_addr_t new_ram_size)
1524{
1525 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1526 DIRTY_MEMORY_BLOCK_SIZE);
1527 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1528 DIRTY_MEMORY_BLOCK_SIZE);
1529 int i;
1530
1531 /* Only need to extend if block count increased */
1532 if (new_num_blocks <= old_num_blocks) {
1533 return;
1534 }
1535
1536 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1537 DirtyMemoryBlocks *old_blocks;
1538 DirtyMemoryBlocks *new_blocks;
1539 int j;
1540
1541 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1542 new_blocks = g_malloc(sizeof(*new_blocks) +
1543 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1544
1545 if (old_num_blocks) {
1546 memcpy(new_blocks->blocks, old_blocks->blocks,
1547 old_num_blocks * sizeof(old_blocks->blocks[0]));
1548 }
1549
1550 for (j = old_num_blocks; j < new_num_blocks; j++) {
1551 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1552 }
1553
1554 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1555
1556 if (old_blocks) {
1557 g_free_rcu(old_blocks, rcu);
1558 }
1559 }
1560}
1561
Fam Zheng528f46a2016-03-01 14:18:18 +08001562static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001563{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001564 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001565 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001566 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001567 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001568
1569 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001570
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001571 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001572 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001573
1574 if (!new_block->host) {
1575 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001576 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001577 new_block->mr, &err);
1578 if (err) {
1579 error_propagate(errp, err);
1580 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001581 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001582 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001583 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001584 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001585 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001586 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001587 error_setg_errno(errp, errno,
1588 "cannot set up guest memory '%s'",
1589 memory_region_name(new_block->mr));
1590 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001591 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001592 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001593 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001594 }
1595 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001596
Li Zhijiandd631692015-07-02 20:18:06 +08001597 new_ram_size = MAX(old_ram_size,
1598 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1599 if (new_ram_size > old_ram_size) {
1600 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001601 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001602 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001603 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1604 * QLIST (which has an RCU-friendly variant) does not have insertion at
1605 * tail, so save the last element in last_block.
1606 */
Mike Day0dc3f442013-09-05 14:41:35 -04001607 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001608 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001609 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001610 break;
1611 }
1612 }
1613 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001614 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001615 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001616 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001617 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001618 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001619 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001620 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001621
Mike Day0dc3f442013-09-05 14:41:35 -04001622 /* Write list before version */
1623 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001624 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001625 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001626
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001627 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001628 new_block->used_length,
1629 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001630
Paolo Bonzinia904c912015-01-21 16:18:35 +01001631 if (new_block->host) {
1632 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1633 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001634 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001635 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001637}
1638
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001639#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001640RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1641 bool share, const char *mem_path,
1642 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001643{
1644 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001645 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001646
1647 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001648 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001649 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001650 }
1651
1652 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1653 /*
1654 * file_ram_alloc() needs to allocate just like
1655 * phys_mem_alloc, but we haven't bothered to provide
1656 * a hook there.
1657 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001658 error_setg(errp,
1659 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001660 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001661 }
1662
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001663 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001664 new_block = g_malloc0(sizeof(*new_block));
1665 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001666 new_block->used_length = size;
1667 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001668 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001669 new_block->host = file_ram_alloc(new_block, size,
1670 mem_path, errp);
1671 if (!new_block->host) {
1672 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001673 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001674 }
1675
Fam Zheng528f46a2016-03-01 14:18:18 +08001676 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001677 if (local_err) {
1678 g_free(new_block);
1679 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001680 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001681 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001682 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001683}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001684#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001685
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001686static
Fam Zheng528f46a2016-03-01 14:18:18 +08001687RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1688 void (*resized)(const char*,
1689 uint64_t length,
1690 void *host),
1691 void *host, bool resizeable,
1692 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001693{
1694 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001695 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001696
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001697 size = HOST_PAGE_ALIGN(size);
1698 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001699 new_block = g_malloc0(sizeof(*new_block));
1700 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001701 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001702 new_block->used_length = size;
1703 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001704 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001705 new_block->fd = -1;
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001706 new_block->page_size = getpagesize();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001707 new_block->host = host;
1708 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001709 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001710 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001711 if (resizeable) {
1712 new_block->flags |= RAM_RESIZEABLE;
1713 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001714 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001715 if (local_err) {
1716 g_free(new_block);
1717 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001718 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001719 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001720 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001721}
1722
Fam Zheng528f46a2016-03-01 14:18:18 +08001723RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001724 MemoryRegion *mr, Error **errp)
1725{
1726 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1727}
1728
Fam Zheng528f46a2016-03-01 14:18:18 +08001729RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001730{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001731 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1732}
1733
Fam Zheng528f46a2016-03-01 14:18:18 +08001734RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001735 void (*resized)(const char*,
1736 uint64_t length,
1737 void *host),
1738 MemoryRegion *mr, Error **errp)
1739{
1740 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001741}
bellarde9a1ab12007-02-08 23:08:38 +00001742
Paolo Bonzini43771532013-09-09 17:58:40 +02001743static void reclaim_ramblock(RAMBlock *block)
1744{
1745 if (block->flags & RAM_PREALLOC) {
1746 ;
1747 } else if (xen_enabled()) {
1748 xen_invalidate_map_cache_entry(block->host);
1749#ifndef _WIN32
1750 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001751 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001752 close(block->fd);
1753#endif
1754 } else {
1755 qemu_anon_ram_free(block->host, block->max_length);
1756 }
1757 g_free(block);
1758}
1759
Fam Zhengf1060c52016-03-01 14:18:22 +08001760void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001761{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001762 if (!block) {
1763 return;
1764 }
1765
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001766 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001767 QLIST_REMOVE_RCU(block, next);
1768 ram_list.mru_block = NULL;
1769 /* Write list before version */
1770 smp_wmb();
1771 ram_list.version++;
1772 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001773 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001774}
1775
Huang Yingcd19cfa2011-03-02 08:56:19 +01001776#ifndef _WIN32
1777void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1778{
1779 RAMBlock *block;
1780 ram_addr_t offset;
1781 int flags;
1782 void *area, *vaddr;
1783
Mike Day0dc3f442013-09-05 14:41:35 -04001784 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001785 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001786 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001787 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001788 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001789 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001790 } else if (xen_enabled()) {
1791 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001792 } else {
1793 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001794 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001795 flags |= (block->flags & RAM_SHARED ?
1796 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001797 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1798 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001799 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001800 /*
1801 * Remap needs to match alloc. Accelerators that
1802 * set phys_mem_alloc never remap. If they did,
1803 * we'd need a remap hook here.
1804 */
1805 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1806
Huang Yingcd19cfa2011-03-02 08:56:19 +01001807 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1808 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1809 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001810 }
1811 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001812 fprintf(stderr, "Could not remap addr: "
1813 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001814 length, addr);
1815 exit(1);
1816 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001817 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001818 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001819 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001820 }
1821 }
1822}
1823#endif /* !_WIN32 */
1824
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001825/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001826 * This should not be used for general purpose DMA. Use address_space_map
1827 * or address_space_rw instead. For local memory (e.g. video ram) that the
1828 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001829 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001830 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001831 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001832void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001833{
Gonglei3655cb92016-02-20 10:35:20 +08001834 RAMBlock *block = ram_block;
1835
1836 if (block == NULL) {
1837 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001838 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001839 }
Mike Dayae3a7042013-09-05 14:41:35 -04001840
1841 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001842 /* We need to check if the requested address is in the RAM
1843 * because we don't want to map the entire memory in QEMU.
1844 * In that case just map until the end of the page.
1845 */
1846 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001847 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001848 }
Mike Dayae3a7042013-09-05 14:41:35 -04001849
1850 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001851 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001852 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001853}
1854
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001855/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001856 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001857 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001858 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001859 */
Gonglei3655cb92016-02-20 10:35:20 +08001860static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1861 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001862{
Gonglei3655cb92016-02-20 10:35:20 +08001863 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001864 if (*size == 0) {
1865 return NULL;
1866 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001867
Gonglei3655cb92016-02-20 10:35:20 +08001868 if (block == NULL) {
1869 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001870 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001871 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001872 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001873
1874 if (xen_enabled() && block->host == NULL) {
1875 /* We need to check if the requested address is in the RAM
1876 * because we don't want to map the entire memory in QEMU.
1877 * In that case just map the requested area.
1878 */
1879 if (block->offset == 0) {
1880 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001881 }
1882
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001883 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001884 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001885
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001886 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001887}
1888
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001889/*
1890 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1891 * in that RAMBlock.
1892 *
1893 * ptr: Host pointer to look up
1894 * round_offset: If true round the result offset down to a page boundary
1895 * *ram_addr: set to result ram_addr
1896 * *offset: set to result offset within the RAMBlock
1897 *
1898 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001899 *
1900 * By the time this function returns, the returned pointer is not protected
1901 * by RCU anymore. If the caller is not within an RCU critical section and
1902 * does not hold the iothread lock, it must have other means of protecting the
1903 * pointer, such as a reference to the region that includes the incoming
1904 * ram_addr_t.
1905 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001906RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001907 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001908{
pbrook94a6b542009-04-11 17:15:54 +00001909 RAMBlock *block;
1910 uint8_t *host = ptr;
1911
Jan Kiszka868bb332011-06-21 22:59:09 +02001912 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001913 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001914 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001915 ram_addr = xen_ram_addr_from_mapcache(ptr);
1916 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001917 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001918 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001919 }
Mike Day0dc3f442013-09-05 14:41:35 -04001920 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001921 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001922 }
1923
Mike Day0dc3f442013-09-05 14:41:35 -04001924 rcu_read_lock();
1925 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001926 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001927 goto found;
1928 }
1929
Mike Day0dc3f442013-09-05 14:41:35 -04001930 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001931 /* This case append when the block is not mapped. */
1932 if (block->host == NULL) {
1933 continue;
1934 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001935 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001936 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001937 }
pbrook94a6b542009-04-11 17:15:54 +00001938 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001939
Mike Day0dc3f442013-09-05 14:41:35 -04001940 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001941 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001942
1943found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001944 *offset = (host - block->host);
1945 if (round_offset) {
1946 *offset &= TARGET_PAGE_MASK;
1947 }
Mike Day0dc3f442013-09-05 14:41:35 -04001948 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001949 return block;
1950}
1951
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001952/*
1953 * Finds the named RAMBlock
1954 *
1955 * name: The name of RAMBlock to find
1956 *
1957 * Returns: RAMBlock (or NULL if not found)
1958 */
1959RAMBlock *qemu_ram_block_by_name(const char *name)
1960{
1961 RAMBlock *block;
1962
1963 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1964 if (!strcmp(name, block->idstr)) {
1965 return block;
1966 }
1967 }
1968
1969 return NULL;
1970}
1971
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001972/* Some of the softmmu routines need to translate from a host pointer
1973 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001974ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001975{
1976 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001977 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001978
Paolo Bonzinif615f392016-05-26 10:07:50 +02001979 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001980 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001981 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001982 }
1983
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001984 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001985}
Alex Williamsonf471a172010-06-11 11:11:42 -06001986
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001987/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001988static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001989 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001990{
Juan Quintela52159192013-10-08 12:44:04 +02001991 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001992 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001993 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001994 switch (size) {
1995 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001996 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001997 break;
1998 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001999 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002000 break;
2001 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002002 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002003 break;
2004 default:
2005 abort();
2006 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002007 /* Set both VGA and migration bits for simplicity and to remove
2008 * the notdirty callback faster.
2009 */
2010 cpu_physical_memory_set_dirty_range(ram_addr, size,
2011 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002012 /* we remove the notdirty callback only if the code has been
2013 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002014 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002015 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002016 }
bellard1ccde1c2004-02-06 19:46:14 +00002017}
2018
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002019static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2020 unsigned size, bool is_write)
2021{
2022 return is_write;
2023}
2024
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002025static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002026 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002027 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002028 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002029};
2030
pbrook0f459d12008-06-09 00:20:13 +00002031/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002032static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002033{
Andreas Färber93afead2013-08-26 03:41:01 +02002034 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002035 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002036 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002037 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002038 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002039 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002040 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002041
Andreas Färberff4700b2013-08-26 18:23:18 +02002042 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002043 /* We re-entered the check after replacing the TB. Now raise
2044 * the debug interrupt so that is will trigger after the
2045 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002046 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002047 return;
2048 }
Andreas Färber93afead2013-08-26 03:41:01 +02002049 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002050 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002051 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2052 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002053 if (flags == BP_MEM_READ) {
2054 wp->flags |= BP_WATCHPOINT_HIT_READ;
2055 } else {
2056 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2057 }
2058 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002059 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002060 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002061 if (wp->flags & BP_CPU &&
2062 !cc->debug_check_watchpoint(cpu, wp)) {
2063 wp->flags &= ~BP_WATCHPOINT_HIT;
2064 continue;
2065 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002066 cpu->watchpoint_hit = wp;
KONRAD Frederica5e99822016-10-27 16:10:06 +01002067
2068 /* The tb_lock will be reset when cpu_loop_exit or
2069 * cpu_loop_exit_noexc longjmp back into the cpu_exec
2070 * main loop.
2071 */
2072 tb_lock();
Andreas Färber239c51a2013-09-01 17:12:23 +02002073 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002074 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002075 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002076 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002077 } else {
2078 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002079 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002080 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002081 }
aliguori06d55cc2008-11-18 20:24:06 +00002082 }
aliguori6e140f22008-11-18 20:37:55 +00002083 } else {
2084 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002085 }
2086 }
2087}
2088
pbrook6658ffb2007-03-16 23:58:11 +00002089/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2090 so these check for a hit then pass through to the normal out-of-line
2091 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002092static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2093 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002094{
Peter Maydell66b9b432015-04-26 16:49:24 +01002095 MemTxResult res;
2096 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002097 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2098 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002099
Peter Maydell66b9b432015-04-26 16:49:24 +01002100 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002101 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002102 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002103 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002104 break;
2105 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002106 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002107 break;
2108 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002109 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002110 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002111 default: abort();
2112 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002113 *pdata = data;
2114 return res;
2115}
2116
2117static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2118 uint64_t val, unsigned size,
2119 MemTxAttrs attrs)
2120{
2121 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002122 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2123 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002124
2125 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2126 switch (size) {
2127 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002128 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002129 break;
2130 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002131 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002132 break;
2133 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002134 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002135 break;
2136 default: abort();
2137 }
2138 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002139}
2140
Avi Kivity1ec9b902012-01-02 12:47:48 +02002141static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002142 .read_with_attrs = watch_mem_read,
2143 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002144 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002145};
pbrook6658ffb2007-03-16 23:58:11 +00002146
Peter Maydellf25a49e2015-04-26 16:49:24 +01002147static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2148 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002149{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002151 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002152 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002153
blueswir1db7b5422007-05-26 17:36:03 +00002154#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002155 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002156 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002157#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002158 res = address_space_read(subpage->as, addr + subpage->base,
2159 attrs, buf, len);
2160 if (res) {
2161 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002162 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002163 switch (len) {
2164 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002165 *data = ldub_p(buf);
2166 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002167 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002168 *data = lduw_p(buf);
2169 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002170 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002171 *data = ldl_p(buf);
2172 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002173 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002174 *data = ldq_p(buf);
2175 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 default:
2177 abort();
2178 }
blueswir1db7b5422007-05-26 17:36:03 +00002179}
2180
Peter Maydellf25a49e2015-04-26 16:49:24 +01002181static MemTxResult subpage_write(void *opaque, hwaddr addr,
2182 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002183{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002184 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002185 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002186
blueswir1db7b5422007-05-26 17:36:03 +00002187#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002188 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002189 " value %"PRIx64"\n",
2190 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002191#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002192 switch (len) {
2193 case 1:
2194 stb_p(buf, value);
2195 break;
2196 case 2:
2197 stw_p(buf, value);
2198 break;
2199 case 4:
2200 stl_p(buf, value);
2201 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002202 case 8:
2203 stq_p(buf, value);
2204 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002205 default:
2206 abort();
2207 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002208 return address_space_write(subpage->as, addr + subpage->base,
2209 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002210}
2211
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002212static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002213 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002214{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002215 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002216#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002217 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002218 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002219#endif
2220
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002221 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002222 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002223}
2224
Avi Kivity70c68e42012-01-02 12:32:48 +02002225static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002226 .read_with_attrs = subpage_read,
2227 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002228 .impl.min_access_size = 1,
2229 .impl.max_access_size = 8,
2230 .valid.min_access_size = 1,
2231 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002232 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002233 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002234};
2235
Anthony Liguoric227f092009-10-01 16:12:16 -05002236static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002237 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002238{
2239 int idx, eidx;
2240
2241 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2242 return -1;
2243 idx = SUBPAGE_IDX(start);
2244 eidx = SUBPAGE_IDX(end);
2245#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002246 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2247 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002248#endif
blueswir1db7b5422007-05-26 17:36:03 +00002249 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002250 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002251 }
2252
2253 return 0;
2254}
2255
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002256static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002257{
Anthony Liguoric227f092009-10-01 16:12:16 -05002258 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002259
Vijaya Kumar K2615fab2016-10-24 16:26:49 +01002260 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002261 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002262 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002263 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002264 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002265 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002266#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002267 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2268 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002269#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002270 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002271
2272 return mmio;
2273}
2274
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002275static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2276 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002277{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002278 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002279 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002280 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002281 .mr = mr,
2282 .offset_within_address_space = 0,
2283 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002284 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002285 };
2286
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002287 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002288}
2289
Peter Maydella54c87b2016-01-21 14:15:05 +00002290MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002291{
Peter Maydella54c87b2016-01-21 14:15:05 +00002292 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2293 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002294 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002295 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002296
2297 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002298}
2299
Avi Kivitye9179ce2009-06-14 11:38:52 +03002300static void io_mem_init(void)
2301{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002302 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002303 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002304 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002305 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002306 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002307 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002308 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002309}
2310
Avi Kivityac1970f2012-10-03 16:22:53 +02002311static void mem_begin(MemoryListener *listener)
2312{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002313 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002314 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2315 uint16_t n;
2316
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002317 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002318 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002319 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002320 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002321 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002322 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002323 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002324 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002325
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002326 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002327 d->as = as;
2328 as->next_dispatch = d;
2329}
2330
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002331static void address_space_dispatch_free(AddressSpaceDispatch *d)
2332{
2333 phys_sections_free(&d->map);
2334 g_free(d);
2335}
2336
Paolo Bonzini00752702013-05-29 12:13:54 +02002337static void mem_commit(MemoryListener *listener)
2338{
2339 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002340 AddressSpaceDispatch *cur = as->dispatch;
2341 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002342
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002343 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002344
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002345 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002346 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002347 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002348 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002349}
2350
Avi Kivity1d711482012-10-02 18:54:45 +02002351static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002352{
Peter Maydell32857f42015-10-01 15:29:50 +01002353 CPUAddressSpace *cpuas;
2354 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002355
2356 /* since each CPU stores ram addresses in its TLB cache, we must
2357 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002358 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2359 cpu_reloading_memory_map();
2360 /* The CPU and TLB are protected by the iothread lock.
2361 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2362 * may have split the RCU critical section.
2363 */
2364 d = atomic_rcu_read(&cpuas->as->dispatch);
2365 cpuas->memory_dispatch = d;
2366 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002367}
2368
Avi Kivityac1970f2012-10-03 16:22:53 +02002369void address_space_init_dispatch(AddressSpace *as)
2370{
Paolo Bonzini00752702013-05-29 12:13:54 +02002371 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002372 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002373 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002374 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002375 .region_add = mem_add,
2376 .region_nop = mem_add,
2377 .priority = 0,
2378 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002379 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002380}
2381
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002382void address_space_unregister(AddressSpace *as)
2383{
2384 memory_listener_unregister(&as->dispatch_listener);
2385}
2386
Avi Kivity83f3c252012-10-07 12:59:55 +02002387void address_space_destroy_dispatch(AddressSpace *as)
2388{
2389 AddressSpaceDispatch *d = as->dispatch;
2390
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002391 atomic_rcu_set(&as->dispatch, NULL);
2392 if (d) {
2393 call_rcu(d, address_space_dispatch_free, rcu);
2394 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002395}
2396
Avi Kivity62152b82011-07-26 14:26:14 +03002397static void memory_map_init(void)
2398{
Anthony Liguori7267c092011-08-20 22:09:37 -05002399 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002400
Paolo Bonzini57271d62013-11-07 17:14:37 +01002401 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002402 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002403
Anthony Liguori7267c092011-08-20 22:09:37 -05002404 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002405 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2406 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002407 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002408}
2409
2410MemoryRegion *get_system_memory(void)
2411{
2412 return system_memory;
2413}
2414
Avi Kivity309cb472011-08-08 16:09:03 +03002415MemoryRegion *get_system_io(void)
2416{
2417 return system_io;
2418}
2419
pbrooke2eef172008-06-08 01:09:01 +00002420#endif /* !defined(CONFIG_USER_ONLY) */
2421
bellard13eb76e2004-01-24 15:23:36 +00002422/* physical memory access (slow version, mainly for debug) */
2423#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002424int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002425 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002426{
2427 int l, flags;
2428 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002429 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002430
2431 while (len > 0) {
2432 page = addr & TARGET_PAGE_MASK;
2433 l = (page + TARGET_PAGE_SIZE) - addr;
2434 if (l > len)
2435 l = len;
2436 flags = page_get_flags(page);
2437 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002438 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002439 if (is_write) {
2440 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002441 return -1;
bellard579a97f2007-11-11 14:26:47 +00002442 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002443 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002444 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002445 memcpy(p, buf, l);
2446 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002447 } else {
2448 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002449 return -1;
bellard579a97f2007-11-11 14:26:47 +00002450 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002451 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002452 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002453 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002454 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002455 }
2456 len -= l;
2457 buf += l;
2458 addr += l;
2459 }
Paul Brooka68fe892010-03-01 00:08:59 +00002460 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002461}
bellard8df1cd02005-01-28 22:37:22 +00002462
bellard13eb76e2004-01-24 15:23:36 +00002463#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002464
Paolo Bonzini845b6212015-03-23 11:45:53 +01002465static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002466 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002467{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002468 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002469 addr += memory_region_get_ram_addr(mr);
2470
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002471 /* No early return if dirty_log_mask is or becomes 0, because
2472 * cpu_physical_memory_set_dirty_range will still call
2473 * xen_modified_memory.
2474 */
2475 if (dirty_log_mask) {
2476 dirty_log_mask =
2477 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002478 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002479 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2480 tb_invalidate_phys_range(addr, addr + length);
2481 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2482 }
2483 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002484}
2485
Richard Henderson23326162013-07-08 14:55:59 -07002486static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002487{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002488 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002489
2490 /* Regions are assumed to support 1-4 byte accesses unless
2491 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002492 if (access_size_max == 0) {
2493 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002494 }
Richard Henderson23326162013-07-08 14:55:59 -07002495
2496 /* Bound the maximum access by the alignment of the address. */
2497 if (!mr->ops->impl.unaligned) {
2498 unsigned align_size_max = addr & -addr;
2499 if (align_size_max != 0 && align_size_max < access_size_max) {
2500 access_size_max = align_size_max;
2501 }
2502 }
2503
2504 /* Don't attempt accesses larger than the maximum. */
2505 if (l > access_size_max) {
2506 l = access_size_max;
2507 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002508 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002509
2510 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002511}
2512
Jan Kiszka4840f102015-06-18 18:47:22 +02002513static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002514{
Jan Kiszka4840f102015-06-18 18:47:22 +02002515 bool unlocked = !qemu_mutex_iothread_locked();
2516 bool release_lock = false;
2517
2518 if (unlocked && mr->global_locking) {
2519 qemu_mutex_lock_iothread();
2520 unlocked = false;
2521 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002522 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002523 if (mr->flush_coalesced_mmio) {
2524 if (unlocked) {
2525 qemu_mutex_lock_iothread();
2526 }
2527 qemu_flush_coalesced_mmio_buffer();
2528 if (unlocked) {
2529 qemu_mutex_unlock_iothread();
2530 }
2531 }
2532
2533 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002534}
2535
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002536/* Called within RCU critical section. */
2537static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2538 MemTxAttrs attrs,
2539 const uint8_t *buf,
2540 int len, hwaddr addr1,
2541 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002542{
bellard13eb76e2004-01-24 15:23:36 +00002543 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002544 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002545 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002546 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002547
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002548 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002549 if (!memory_access_is_direct(mr, true)) {
2550 release_lock |= prepare_mmio_access(mr);
2551 l = memory_access_size(mr, l, addr1);
2552 /* XXX: could force current_cpu to NULL to avoid
2553 potential bugs */
2554 switch (l) {
2555 case 8:
2556 /* 64 bit write access */
2557 val = ldq_p(buf);
2558 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2559 attrs);
2560 break;
2561 case 4:
2562 /* 32 bit write access */
2563 val = ldl_p(buf);
2564 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2565 attrs);
2566 break;
2567 case 2:
2568 /* 16 bit write access */
2569 val = lduw_p(buf);
2570 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2571 attrs);
2572 break;
2573 case 1:
2574 /* 8 bit write access */
2575 val = ldub_p(buf);
2576 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2577 attrs);
2578 break;
2579 default:
2580 abort();
bellard13eb76e2004-01-24 15:23:36 +00002581 }
2582 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002583 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002584 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002585 memcpy(ptr, buf, l);
2586 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002587 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002588
2589 if (release_lock) {
2590 qemu_mutex_unlock_iothread();
2591 release_lock = false;
2592 }
2593
bellard13eb76e2004-01-24 15:23:36 +00002594 len -= l;
2595 buf += l;
2596 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002597
2598 if (!len) {
2599 break;
2600 }
2601
2602 l = len;
2603 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002604 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002605
Peter Maydell3b643492015-04-26 16:49:23 +01002606 return result;
bellard13eb76e2004-01-24 15:23:36 +00002607}
bellard8df1cd02005-01-28 22:37:22 +00002608
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002609MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2610 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002611{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002612 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002613 hwaddr addr1;
2614 MemoryRegion *mr;
2615 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002616
2617 if (len > 0) {
2618 rcu_read_lock();
2619 l = len;
2620 mr = address_space_translate(as, addr, &addr1, &l, true);
2621 result = address_space_write_continue(as, addr, attrs, buf, len,
2622 addr1, l, mr);
2623 rcu_read_unlock();
2624 }
2625
2626 return result;
2627}
2628
2629/* Called within RCU critical section. */
2630MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2631 MemTxAttrs attrs, uint8_t *buf,
2632 int len, hwaddr addr1, hwaddr l,
2633 MemoryRegion *mr)
2634{
2635 uint8_t *ptr;
2636 uint64_t val;
2637 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002638 bool release_lock = false;
2639
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002640 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002641 if (!memory_access_is_direct(mr, false)) {
2642 /* I/O case */
2643 release_lock |= prepare_mmio_access(mr);
2644 l = memory_access_size(mr, l, addr1);
2645 switch (l) {
2646 case 8:
2647 /* 64 bit read access */
2648 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2649 attrs);
2650 stq_p(buf, val);
2651 break;
2652 case 4:
2653 /* 32 bit read access */
2654 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2655 attrs);
2656 stl_p(buf, val);
2657 break;
2658 case 2:
2659 /* 16 bit read access */
2660 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2661 attrs);
2662 stw_p(buf, val);
2663 break;
2664 case 1:
2665 /* 8 bit read access */
2666 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2667 attrs);
2668 stb_p(buf, val);
2669 break;
2670 default:
2671 abort();
2672 }
2673 } else {
2674 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002675 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002676 memcpy(buf, ptr, l);
2677 }
2678
2679 if (release_lock) {
2680 qemu_mutex_unlock_iothread();
2681 release_lock = false;
2682 }
2683
2684 len -= l;
2685 buf += l;
2686 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002687
2688 if (!len) {
2689 break;
2690 }
2691
2692 l = len;
2693 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002694 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002695
2696 return result;
2697}
2698
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002699MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2700 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002701{
2702 hwaddr l;
2703 hwaddr addr1;
2704 MemoryRegion *mr;
2705 MemTxResult result = MEMTX_OK;
2706
2707 if (len > 0) {
2708 rcu_read_lock();
2709 l = len;
2710 mr = address_space_translate(as, addr, &addr1, &l, false);
2711 result = address_space_read_continue(as, addr, attrs, buf, len,
2712 addr1, l, mr);
2713 rcu_read_unlock();
2714 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002715
2716 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002717}
2718
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002719MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2720 uint8_t *buf, int len, bool is_write)
2721{
2722 if (is_write) {
2723 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2724 } else {
2725 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2726 }
2727}
Avi Kivityac1970f2012-10-03 16:22:53 +02002728
Avi Kivitya8170e52012-10-23 12:30:10 +02002729void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002730 int len, int is_write)
2731{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002732 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2733 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002734}
2735
Alexander Graf582b55a2013-12-11 14:17:44 +01002736enum write_rom_type {
2737 WRITE_DATA,
2738 FLUSH_CACHE,
2739};
2740
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002741static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002742 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002743{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002744 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002745 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002746 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002747 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002748
Paolo Bonzini41063e12015-03-18 14:21:43 +01002749 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002750 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002751 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002752 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002753
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002754 if (!(memory_region_is_ram(mr) ||
2755 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002756 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002757 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002758 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002759 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002760 switch (type) {
2761 case WRITE_DATA:
2762 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002763 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002764 break;
2765 case FLUSH_CACHE:
2766 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2767 break;
2768 }
bellardd0ecd2a2006-04-23 17:14:48 +00002769 }
2770 len -= l;
2771 buf += l;
2772 addr += l;
2773 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002774 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002775}
2776
Alexander Graf582b55a2013-12-11 14:17:44 +01002777/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002778void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002779 const uint8_t *buf, int len)
2780{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002781 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002782}
2783
2784void cpu_flush_icache_range(hwaddr start, int len)
2785{
2786 /*
2787 * This function should do the same thing as an icache flush that was
2788 * triggered from within the guest. For TCG we are always cache coherent,
2789 * so there is no need to flush anything. For KVM / Xen we need to flush
2790 * the host's instruction cache at least.
2791 */
2792 if (tcg_enabled()) {
2793 return;
2794 }
2795
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002796 cpu_physical_memory_write_rom_internal(&address_space_memory,
2797 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002798}
2799
aliguori6d16c2f2009-01-22 16:59:11 +00002800typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002801 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002802 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002803 hwaddr addr;
2804 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002805 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002806} BounceBuffer;
2807
2808static BounceBuffer bounce;
2809
aliguoriba223c22009-01-22 16:59:16 +00002810typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002811 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002812 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002813} MapClient;
2814
Fam Zheng38e047b2015-03-16 17:03:35 +08002815QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002816static QLIST_HEAD(map_client_list, MapClient) map_client_list
2817 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002818
Fam Zhenge95205e2015-03-16 17:03:37 +08002819static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002820{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002821 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002822 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002823}
2824
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002825static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002826{
2827 MapClient *client;
2828
Blue Swirl72cf2d42009-09-12 07:36:22 +00002829 while (!QLIST_EMPTY(&map_client_list)) {
2830 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002831 qemu_bh_schedule(client->bh);
2832 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002833 }
2834}
2835
Fam Zhenge95205e2015-03-16 17:03:37 +08002836void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002837{
2838 MapClient *client = g_malloc(sizeof(*client));
2839
Fam Zheng38e047b2015-03-16 17:03:35 +08002840 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002841 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002842 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002843 if (!atomic_read(&bounce.in_use)) {
2844 cpu_notify_map_clients_locked();
2845 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002846 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002847}
2848
Fam Zheng38e047b2015-03-16 17:03:35 +08002849void cpu_exec_init_all(void)
2850{
2851 qemu_mutex_init(&ram_list.mutex);
Peter Maydell20bccb82016-10-24 16:26:49 +01002852 /* The data structures we set up here depend on knowing the page size,
2853 * so no more changes can be made after this point.
2854 * In an ideal world, nothing we did before we had finished the
2855 * machine setup would care about the target page size, and we could
2856 * do this much later, rather than requiring board models to state
2857 * up front what their requirements are.
2858 */
2859 finalize_target_page_bits();
Fam Zheng38e047b2015-03-16 17:03:35 +08002860 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002861 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002862 qemu_mutex_init(&map_client_list_lock);
2863}
2864
Fam Zhenge95205e2015-03-16 17:03:37 +08002865void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002866{
Fam Zhenge95205e2015-03-16 17:03:37 +08002867 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002868
Fam Zhenge95205e2015-03-16 17:03:37 +08002869 qemu_mutex_lock(&map_client_list_lock);
2870 QLIST_FOREACH(client, &map_client_list, link) {
2871 if (client->bh == bh) {
2872 cpu_unregister_map_client_do(client);
2873 break;
2874 }
2875 }
2876 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002877}
2878
2879static void cpu_notify_map_clients(void)
2880{
Fam Zheng38e047b2015-03-16 17:03:35 +08002881 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002882 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002883 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002884}
2885
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002886bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2887{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002888 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002889 hwaddr l, xlat;
2890
Paolo Bonzini41063e12015-03-18 14:21:43 +01002891 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002892 while (len > 0) {
2893 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002894 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2895 if (!memory_access_is_direct(mr, is_write)) {
2896 l = memory_access_size(mr, l, addr);
2897 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002898 return false;
2899 }
2900 }
2901
2902 len -= l;
2903 addr += l;
2904 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002905 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002906 return true;
2907}
2908
aliguori6d16c2f2009-01-22 16:59:11 +00002909/* Map a physical memory region into a host virtual address.
2910 * May map a subset of the requested range, given by and returned in *plen.
2911 * May return NULL if resources needed to perform the mapping are exhausted.
2912 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002913 * Use cpu_register_map_client() to know when retrying the map operation is
2914 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002915 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002916void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002917 hwaddr addr,
2918 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002919 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002920{
Avi Kivitya8170e52012-10-23 12:30:10 +02002921 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002922 hwaddr done = 0;
2923 hwaddr l, xlat, base;
2924 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002925 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002926
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002927 if (len == 0) {
2928 return NULL;
2929 }
aliguori6d16c2f2009-01-22 16:59:11 +00002930
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002931 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002932 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002933 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002934
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002935 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002936 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002937 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002938 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002939 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002940 /* Avoid unbounded allocations */
2941 l = MIN(l, TARGET_PAGE_SIZE);
2942 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002943 bounce.addr = addr;
2944 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002945
2946 memory_region_ref(mr);
2947 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002948 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002949 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2950 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002951 }
aliguori6d16c2f2009-01-22 16:59:11 +00002952
Paolo Bonzini41063e12015-03-18 14:21:43 +01002953 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002954 *plen = l;
2955 return bounce.buffer;
2956 }
2957
2958 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002959
2960 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002961 len -= l;
2962 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002963 done += l;
2964 if (len == 0) {
2965 break;
2966 }
2967
2968 l = len;
2969 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2970 if (this_mr != mr || xlat != base + done) {
2971 break;
2972 }
aliguori6d16c2f2009-01-22 16:59:11 +00002973 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002975 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002976 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002977 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002978 rcu_read_unlock();
2979
2980 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002981}
2982
Avi Kivityac1970f2012-10-03 16:22:53 +02002983/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002984 * Will also mark the memory as dirty if is_write == 1. access_len gives
2985 * the amount of memory that was actually read or written by the caller.
2986 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002987void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2988 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002989{
2990 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002991 MemoryRegion *mr;
2992 ram_addr_t addr1;
2993
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002994 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002995 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002996 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002997 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002998 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002999 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003000 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003001 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003002 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003003 return;
3004 }
3005 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003006 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3007 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003008 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003009 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003010 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003011 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003012 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003013 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003014}
bellardd0ecd2a2006-04-23 17:14:48 +00003015
Avi Kivitya8170e52012-10-23 12:30:10 +02003016void *cpu_physical_memory_map(hwaddr addr,
3017 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003018 int is_write)
3019{
3020 return address_space_map(&address_space_memory, addr, plen, is_write);
3021}
3022
Avi Kivitya8170e52012-10-23 12:30:10 +02003023void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3024 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003025{
3026 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3027}
3028
bellard8df1cd02005-01-28 22:37:22 +00003029/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003030static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3031 MemTxAttrs attrs,
3032 MemTxResult *result,
3033 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003034{
bellard8df1cd02005-01-28 22:37:22 +00003035 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003036 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003037 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003038 hwaddr l = 4;
3039 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003040 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003041 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003042
Paolo Bonzini41063e12015-03-18 14:21:43 +01003043 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003044 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003045 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003046 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003047
bellard8df1cd02005-01-28 22:37:22 +00003048 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003049 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003050#if defined(TARGET_WORDS_BIGENDIAN)
3051 if (endian == DEVICE_LITTLE_ENDIAN) {
3052 val = bswap32(val);
3053 }
3054#else
3055 if (endian == DEVICE_BIG_ENDIAN) {
3056 val = bswap32(val);
3057 }
3058#endif
bellard8df1cd02005-01-28 22:37:22 +00003059 } else {
3060 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003061 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003062 switch (endian) {
3063 case DEVICE_LITTLE_ENDIAN:
3064 val = ldl_le_p(ptr);
3065 break;
3066 case DEVICE_BIG_ENDIAN:
3067 val = ldl_be_p(ptr);
3068 break;
3069 default:
3070 val = ldl_p(ptr);
3071 break;
3072 }
Peter Maydell50013112015-04-26 16:49:24 +01003073 r = MEMTX_OK;
3074 }
3075 if (result) {
3076 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003077 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003078 if (release_lock) {
3079 qemu_mutex_unlock_iothread();
3080 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003081 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003082 return val;
3083}
3084
Peter Maydell50013112015-04-26 16:49:24 +01003085uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3086 MemTxAttrs attrs, MemTxResult *result)
3087{
3088 return address_space_ldl_internal(as, addr, attrs, result,
3089 DEVICE_NATIVE_ENDIAN);
3090}
3091
3092uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3093 MemTxAttrs attrs, MemTxResult *result)
3094{
3095 return address_space_ldl_internal(as, addr, attrs, result,
3096 DEVICE_LITTLE_ENDIAN);
3097}
3098
3099uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3100 MemTxAttrs attrs, MemTxResult *result)
3101{
3102 return address_space_ldl_internal(as, addr, attrs, result,
3103 DEVICE_BIG_ENDIAN);
3104}
3105
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003106uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003107{
Peter Maydell50013112015-04-26 16:49:24 +01003108 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003109}
3110
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003111uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003112{
Peter Maydell50013112015-04-26 16:49:24 +01003113 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003114}
3115
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003116uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003117{
Peter Maydell50013112015-04-26 16:49:24 +01003118 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003119}
3120
bellard84b7b8e2005-11-28 21:19:04 +00003121/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003122static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3123 MemTxAttrs attrs,
3124 MemTxResult *result,
3125 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003126{
bellard84b7b8e2005-11-28 21:19:04 +00003127 uint8_t *ptr;
3128 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003129 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003130 hwaddr l = 8;
3131 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003132 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003133 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003134
Paolo Bonzini41063e12015-03-18 14:21:43 +01003135 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003136 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003137 false);
3138 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003139 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003140
bellard84b7b8e2005-11-28 21:19:04 +00003141 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003142 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003143#if defined(TARGET_WORDS_BIGENDIAN)
3144 if (endian == DEVICE_LITTLE_ENDIAN) {
3145 val = bswap64(val);
3146 }
3147#else
3148 if (endian == DEVICE_BIG_ENDIAN) {
3149 val = bswap64(val);
3150 }
3151#endif
bellard84b7b8e2005-11-28 21:19:04 +00003152 } else {
3153 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003154 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003155 switch (endian) {
3156 case DEVICE_LITTLE_ENDIAN:
3157 val = ldq_le_p(ptr);
3158 break;
3159 case DEVICE_BIG_ENDIAN:
3160 val = ldq_be_p(ptr);
3161 break;
3162 default:
3163 val = ldq_p(ptr);
3164 break;
3165 }
Peter Maydell50013112015-04-26 16:49:24 +01003166 r = MEMTX_OK;
3167 }
3168 if (result) {
3169 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003170 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003171 if (release_lock) {
3172 qemu_mutex_unlock_iothread();
3173 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003174 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003175 return val;
3176}
3177
Peter Maydell50013112015-04-26 16:49:24 +01003178uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3179 MemTxAttrs attrs, MemTxResult *result)
3180{
3181 return address_space_ldq_internal(as, addr, attrs, result,
3182 DEVICE_NATIVE_ENDIAN);
3183}
3184
3185uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3186 MemTxAttrs attrs, MemTxResult *result)
3187{
3188 return address_space_ldq_internal(as, addr, attrs, result,
3189 DEVICE_LITTLE_ENDIAN);
3190}
3191
3192uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3193 MemTxAttrs attrs, MemTxResult *result)
3194{
3195 return address_space_ldq_internal(as, addr, attrs, result,
3196 DEVICE_BIG_ENDIAN);
3197}
3198
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003199uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003200{
Peter Maydell50013112015-04-26 16:49:24 +01003201 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003202}
3203
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003204uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003205{
Peter Maydell50013112015-04-26 16:49:24 +01003206 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003207}
3208
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003209uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003210{
Peter Maydell50013112015-04-26 16:49:24 +01003211 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003212}
3213
bellardaab33092005-10-30 20:48:42 +00003214/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003215uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3216 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003217{
3218 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003219 MemTxResult r;
3220
3221 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3222 if (result) {
3223 *result = r;
3224 }
bellardaab33092005-10-30 20:48:42 +00003225 return val;
3226}
3227
Peter Maydell50013112015-04-26 16:49:24 +01003228uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3229{
3230 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3231}
3232
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003233/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003234static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3235 hwaddr addr,
3236 MemTxAttrs attrs,
3237 MemTxResult *result,
3238 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003239{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003240 uint8_t *ptr;
3241 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003242 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003243 hwaddr l = 2;
3244 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003245 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003246 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003247
Paolo Bonzini41063e12015-03-18 14:21:43 +01003248 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003249 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003250 false);
3251 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003252 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003253
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003254 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003255 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003256#if defined(TARGET_WORDS_BIGENDIAN)
3257 if (endian == DEVICE_LITTLE_ENDIAN) {
3258 val = bswap16(val);
3259 }
3260#else
3261 if (endian == DEVICE_BIG_ENDIAN) {
3262 val = bswap16(val);
3263 }
3264#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003265 } else {
3266 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003267 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003268 switch (endian) {
3269 case DEVICE_LITTLE_ENDIAN:
3270 val = lduw_le_p(ptr);
3271 break;
3272 case DEVICE_BIG_ENDIAN:
3273 val = lduw_be_p(ptr);
3274 break;
3275 default:
3276 val = lduw_p(ptr);
3277 break;
3278 }
Peter Maydell50013112015-04-26 16:49:24 +01003279 r = MEMTX_OK;
3280 }
3281 if (result) {
3282 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003283 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003284 if (release_lock) {
3285 qemu_mutex_unlock_iothread();
3286 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003287 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003288 return val;
bellardaab33092005-10-30 20:48:42 +00003289}
3290
Peter Maydell50013112015-04-26 16:49:24 +01003291uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3292 MemTxAttrs attrs, MemTxResult *result)
3293{
3294 return address_space_lduw_internal(as, addr, attrs, result,
3295 DEVICE_NATIVE_ENDIAN);
3296}
3297
3298uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3299 MemTxAttrs attrs, MemTxResult *result)
3300{
3301 return address_space_lduw_internal(as, addr, attrs, result,
3302 DEVICE_LITTLE_ENDIAN);
3303}
3304
3305uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3306 MemTxAttrs attrs, MemTxResult *result)
3307{
3308 return address_space_lduw_internal(as, addr, attrs, result,
3309 DEVICE_BIG_ENDIAN);
3310}
3311
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003312uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313{
Peter Maydell50013112015-04-26 16:49:24 +01003314 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003315}
3316
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003317uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003318{
Peter Maydell50013112015-04-26 16:49:24 +01003319 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003320}
3321
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003322uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003323{
Peter Maydell50013112015-04-26 16:49:24 +01003324 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003325}
3326
bellard8df1cd02005-01-28 22:37:22 +00003327/* warning: addr must be aligned. The ram page is not masked as dirty
3328 and the code inside is not invalidated. It is useful if the dirty
3329 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003330void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3331 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003332{
bellard8df1cd02005-01-28 22:37:22 +00003333 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003334 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003335 hwaddr l = 4;
3336 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003337 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003338 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003339 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003340
Paolo Bonzini41063e12015-03-18 14:21:43 +01003341 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003342 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003343 true);
3344 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003345 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003346
Peter Maydell50013112015-04-26 16:49:24 +01003347 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003348 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003349 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003350 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003351
Paolo Bonzini845b6212015-03-23 11:45:53 +01003352 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3353 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003354 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3355 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003356 r = MEMTX_OK;
3357 }
3358 if (result) {
3359 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003360 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003361 if (release_lock) {
3362 qemu_mutex_unlock_iothread();
3363 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003364 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003365}
3366
Peter Maydell50013112015-04-26 16:49:24 +01003367void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3368{
3369 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3370}
3371
bellard8df1cd02005-01-28 22:37:22 +00003372/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003373static inline void address_space_stl_internal(AddressSpace *as,
3374 hwaddr addr, uint32_t val,
3375 MemTxAttrs attrs,
3376 MemTxResult *result,
3377 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003378{
bellard8df1cd02005-01-28 22:37:22 +00003379 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003380 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003381 hwaddr l = 4;
3382 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003383 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003384 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003385
Paolo Bonzini41063e12015-03-18 14:21:43 +01003386 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003387 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003388 true);
3389 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003390 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003391
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003392#if defined(TARGET_WORDS_BIGENDIAN)
3393 if (endian == DEVICE_LITTLE_ENDIAN) {
3394 val = bswap32(val);
3395 }
3396#else
3397 if (endian == DEVICE_BIG_ENDIAN) {
3398 val = bswap32(val);
3399 }
3400#endif
Peter Maydell50013112015-04-26 16:49:24 +01003401 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003402 } else {
bellard8df1cd02005-01-28 22:37:22 +00003403 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003404 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003405 switch (endian) {
3406 case DEVICE_LITTLE_ENDIAN:
3407 stl_le_p(ptr, val);
3408 break;
3409 case DEVICE_BIG_ENDIAN:
3410 stl_be_p(ptr, val);
3411 break;
3412 default:
3413 stl_p(ptr, val);
3414 break;
3415 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003416 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003417 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003418 }
Peter Maydell50013112015-04-26 16:49:24 +01003419 if (result) {
3420 *result = r;
3421 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003422 if (release_lock) {
3423 qemu_mutex_unlock_iothread();
3424 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003425 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003426}
3427
3428void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3429 MemTxAttrs attrs, MemTxResult *result)
3430{
3431 address_space_stl_internal(as, addr, val, attrs, result,
3432 DEVICE_NATIVE_ENDIAN);
3433}
3434
3435void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3436 MemTxAttrs attrs, MemTxResult *result)
3437{
3438 address_space_stl_internal(as, addr, val, attrs, result,
3439 DEVICE_LITTLE_ENDIAN);
3440}
3441
3442void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3443 MemTxAttrs attrs, MemTxResult *result)
3444{
3445 address_space_stl_internal(as, addr, val, attrs, result,
3446 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003447}
3448
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003449void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003450{
Peter Maydell50013112015-04-26 16:49:24 +01003451 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003452}
3453
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003454void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003455{
Peter Maydell50013112015-04-26 16:49:24 +01003456 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003457}
3458
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003459void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003460{
Peter Maydell50013112015-04-26 16:49:24 +01003461 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003462}
3463
bellardaab33092005-10-30 20:48:42 +00003464/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003465void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3466 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003467{
3468 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003469 MemTxResult r;
3470
3471 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3472 if (result) {
3473 *result = r;
3474 }
3475}
3476
3477void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3478{
3479 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003480}
3481
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003482/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003483static inline void address_space_stw_internal(AddressSpace *as,
3484 hwaddr addr, uint32_t val,
3485 MemTxAttrs attrs,
3486 MemTxResult *result,
3487 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003488{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003489 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003490 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003491 hwaddr l = 2;
3492 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003493 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003494 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003495
Paolo Bonzini41063e12015-03-18 14:21:43 +01003496 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003497 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003498 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003499 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003500
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003501#if defined(TARGET_WORDS_BIGENDIAN)
3502 if (endian == DEVICE_LITTLE_ENDIAN) {
3503 val = bswap16(val);
3504 }
3505#else
3506 if (endian == DEVICE_BIG_ENDIAN) {
3507 val = bswap16(val);
3508 }
3509#endif
Peter Maydell50013112015-04-26 16:49:24 +01003510 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003511 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003512 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003513 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003514 switch (endian) {
3515 case DEVICE_LITTLE_ENDIAN:
3516 stw_le_p(ptr, val);
3517 break;
3518 case DEVICE_BIG_ENDIAN:
3519 stw_be_p(ptr, val);
3520 break;
3521 default:
3522 stw_p(ptr, val);
3523 break;
3524 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003525 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003526 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003527 }
Peter Maydell50013112015-04-26 16:49:24 +01003528 if (result) {
3529 *result = r;
3530 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003531 if (release_lock) {
3532 qemu_mutex_unlock_iothread();
3533 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003534 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003535}
3536
3537void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3538 MemTxAttrs attrs, MemTxResult *result)
3539{
3540 address_space_stw_internal(as, addr, val, attrs, result,
3541 DEVICE_NATIVE_ENDIAN);
3542}
3543
3544void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3545 MemTxAttrs attrs, MemTxResult *result)
3546{
3547 address_space_stw_internal(as, addr, val, attrs, result,
3548 DEVICE_LITTLE_ENDIAN);
3549}
3550
3551void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3552 MemTxAttrs attrs, MemTxResult *result)
3553{
3554 address_space_stw_internal(as, addr, val, attrs, result,
3555 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003556}
3557
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003558void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003559{
Peter Maydell50013112015-04-26 16:49:24 +01003560 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003561}
3562
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003563void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003564{
Peter Maydell50013112015-04-26 16:49:24 +01003565 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003566}
3567
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003568void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003569{
Peter Maydell50013112015-04-26 16:49:24 +01003570 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003571}
3572
bellardaab33092005-10-30 20:48:42 +00003573/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003574void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3575 MemTxAttrs attrs, MemTxResult *result)
3576{
3577 MemTxResult r;
3578 val = tswap64(val);
3579 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3580 if (result) {
3581 *result = r;
3582 }
3583}
3584
3585void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3586 MemTxAttrs attrs, MemTxResult *result)
3587{
3588 MemTxResult r;
3589 val = cpu_to_le64(val);
3590 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3591 if (result) {
3592 *result = r;
3593 }
3594}
3595void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3596 MemTxAttrs attrs, MemTxResult *result)
3597{
3598 MemTxResult r;
3599 val = cpu_to_be64(val);
3600 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3601 if (result) {
3602 *result = r;
3603 }
3604}
3605
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003606void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003607{
Peter Maydell50013112015-04-26 16:49:24 +01003608 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003609}
3610
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003611void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612{
Peter Maydell50013112015-04-26 16:49:24 +01003613 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614}
3615
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003616void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003617{
Peter Maydell50013112015-04-26 16:49:24 +01003618 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003619}
3620
aliguori5e2972f2009-03-28 17:51:36 +00003621/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003622int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003623 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003624{
3625 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003626 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003627 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003628
3629 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003630 int asidx;
3631 MemTxAttrs attrs;
3632
bellard13eb76e2004-01-24 15:23:36 +00003633 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003634 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3635 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003636 /* if no physical page mapped, return an error */
3637 if (phys_addr == -1)
3638 return -1;
3639 l = (page + TARGET_PAGE_SIZE) - addr;
3640 if (l > len)
3641 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003642 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003643 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003644 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3645 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003646 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003647 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3648 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003649 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003650 }
bellard13eb76e2004-01-24 15:23:36 +00003651 len -= l;
3652 buf += l;
3653 addr += l;
3654 }
3655 return 0;
3656}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003657
3658/*
3659 * Allows code that needs to deal with migration bitmaps etc to still be built
3660 * target independent.
3661 */
3662size_t qemu_target_page_bits(void)
3663{
3664 return TARGET_PAGE_BITS;
3665}
3666
Paul Brooka68fe892010-03-01 00:08:59 +00003667#endif
bellard13eb76e2004-01-24 15:23:36 +00003668
Blue Swirl8e4a4242013-01-06 18:30:17 +00003669/*
3670 * A helper function for the _utterly broken_ virtio device model to find out if
3671 * it's running on a big endian machine. Don't do this at home kids!
3672 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003673bool target_words_bigendian(void);
3674bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003675{
3676#if defined(TARGET_WORDS_BIGENDIAN)
3677 return true;
3678#else
3679 return false;
3680#endif
3681}
3682
Wen Congyang76f35532012-05-07 12:04:18 +08003683#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003684bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003685{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003686 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003687 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003688 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003689
Paolo Bonzini41063e12015-03-18 14:21:43 +01003690 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003691 mr = address_space_translate(&address_space_memory,
3692 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003693
Paolo Bonzini41063e12015-03-18 14:21:43 +01003694 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3695 rcu_read_unlock();
3696 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003697}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003698
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003699int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003700{
3701 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003702 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003703
Mike Day0dc3f442013-09-05 14:41:35 -04003704 rcu_read_lock();
3705 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003706 ret = func(block->idstr, block->host, block->offset,
3707 block->used_length, opaque);
3708 if (ret) {
3709 break;
3710 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003711 }
Mike Day0dc3f442013-09-05 14:41:35 -04003712 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003713 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003714}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003715#endif