blob: 51f54ab43013077a711e3ee6852cc37427933aa5 [file] [log] [blame]
Avi Kivity093bc2c2011-07-26 14:26:01 +03001/*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
Avi Kivity093bc2c2011-07-26 14:26:01 +030014 */
15
Peter Maydelld38ea872016-01-29 17:50:05 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010018#include "qemu-common.h"
19#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010020#include "exec/memory.h"
21#include "exec/address-spaces.h"
22#include "exec/ioport.h"
Peter Crosthwaite409ddd02014-06-05 23:16:27 -070023#include "qapi/visitor.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/bitops.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030025#include "qemu/error-report.h"
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -040026#include "qom/object.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000027#include "trace-root.h"
Avi Kivity093bc2c2011-07-26 14:26:01 +030028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Pavel Fedin8c56c1a2015-11-20 12:37:16 +030031#include "sysemu/kvm.h"
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +080032#include "sysemu/sysemu.h"
KONRAD Fredericc9356742016-10-19 15:06:49 +020033#include "hw/misc/mmio_interface.h"
34#include "hw/qdev-properties.h"
Peter Maydellb08199c2017-07-07 15:42:51 +010035#include "migration/vmstate.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020036
Paolo Bonzinid1970632013-05-24 13:23:38 +020037//#define DEBUG_UNASSIGNED
38
Jan Kiszka22bde712012-11-05 16:45:56 +010039static unsigned memory_region_transaction_depth;
40static bool memory_region_update_pending;
Gonglei4dc56152014-05-08 11:47:32 +080041static bool ioeventfd_update_pending;
Avi Kivity7664e802011-12-11 14:47:25 +020042static bool global_dirty_log = false;
43
Avi Kivity72e22d22012-02-08 15:05:50 +020044static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
45 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity4ef4db82011-07-26 14:26:13 +030046
Avi Kivity0d673e32012-10-02 15:28:50 +020047static QTAILQ_HEAD(, AddressSpace) address_spaces
48 = QTAILQ_HEAD_INITIALIZER(address_spaces);
49
Avi Kivity093bc2c2011-07-26 14:26:01 +030050typedef struct AddrRange AddrRange;
51
Avi Kivity8417ceb2011-08-03 11:56:14 +030052/*
Fam Zhengc9cdaa32014-08-11 10:18:31 +080053 * Note that signed integers are needed for negative offsetting in aliases
Avi Kivity8417ceb2011-08-03 11:56:14 +030054 * (large MemoryRegion::alias_offset).
55 */
Avi Kivity093bc2c2011-07-26 14:26:01 +030056struct AddrRange {
Avi Kivity08dafab2011-10-16 13:19:17 +020057 Int128 start;
58 Int128 size;
Avi Kivity093bc2c2011-07-26 14:26:01 +030059};
60
Avi Kivity08dafab2011-10-16 13:19:17 +020061static AddrRange addrrange_make(Int128 start, Int128 size)
Avi Kivity093bc2c2011-07-26 14:26:01 +030062{
63 return (AddrRange) { start, size };
64}
65
66static bool addrrange_equal(AddrRange r1, AddrRange r2)
67{
Avi Kivity08dafab2011-10-16 13:19:17 +020068 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030069}
70
Avi Kivity08dafab2011-10-16 13:19:17 +020071static Int128 addrrange_end(AddrRange r)
Avi Kivity093bc2c2011-07-26 14:26:01 +030072{
Avi Kivity08dafab2011-10-16 13:19:17 +020073 return int128_add(r.start, r.size);
Avi Kivity093bc2c2011-07-26 14:26:01 +030074}
75
Avi Kivity08dafab2011-10-16 13:19:17 +020076static AddrRange addrrange_shift(AddrRange range, Int128 delta)
Avi Kivity093bc2c2011-07-26 14:26:01 +030077{
Avi Kivity08dafab2011-10-16 13:19:17 +020078 int128_addto(&range.start, delta);
Avi Kivity093bc2c2011-07-26 14:26:01 +030079 return range;
80}
81
Avi Kivity08dafab2011-10-16 13:19:17 +020082static bool addrrange_contains(AddrRange range, Int128 addr)
83{
84 return int128_ge(addr, range.start)
85 && int128_lt(addr, addrrange_end(range));
86}
87
Avi Kivity093bc2c2011-07-26 14:26:01 +030088static bool addrrange_intersects(AddrRange r1, AddrRange r2)
89{
Avi Kivity08dafab2011-10-16 13:19:17 +020090 return addrrange_contains(r1, r2.start)
91 || addrrange_contains(r2, r1.start);
Avi Kivity093bc2c2011-07-26 14:26:01 +030092}
93
94static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
95{
Avi Kivity08dafab2011-10-16 13:19:17 +020096 Int128 start = int128_max(r1.start, r2.start);
97 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
98 return addrrange_make(start, int128_sub(end, start));
Avi Kivity093bc2c2011-07-26 14:26:01 +030099}
100
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200101enum ListenerDirection { Forward, Reverse };
102
Avi Kivity7376e582012-02-08 21:05:17 +0200103#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200104 do { \
105 MemoryListener *_listener; \
106 \
107 switch (_direction) { \
108 case Forward: \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
112 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200113 } \
114 break; \
115 case Reverse: \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
Avi Kivity975aefe2012-10-02 16:39:57 +0200118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
120 } \
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200121 } \
122 break; \
123 default: \
124 abort(); \
125 } \
126 } while (0)
127
Paolo Bonzini9a546352016-09-22 16:23:06 +0200128#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
Avi Kivity7376e582012-02-08 21:05:17 +0200129 do { \
130 MemoryListener *_listener; \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200131 struct memory_listeners_as *list = &(_as)->listeners; \
Avi Kivity7376e582012-02-08 21:05:17 +0200132 \
133 switch (_direction) { \
134 case Forward: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200137 _listener->_callback(_listener, _section, ##_args); \
138 } \
139 } \
140 break; \
141 case Reverse: \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
143 link_as) { \
144 if (_listener->_callback) { \
Avi Kivity7376e582012-02-08 21:05:17 +0200145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200154/* No need to ref/unref .mr, the FlatRange keeps it alive. */
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, as); \
Paolo Bonzini9a546352016-09-22 16:23:06 +0200158 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200159 } while(0)
Avi Kivity0e0d36b2012-02-08 16:39:45 +0200160
Avi Kivity093bc2c2011-07-26 14:26:01 +0300161struct CoalescedMemoryRange {
162 AddrRange addr;
163 QTAILQ_ENTRY(CoalescedMemoryRange) link;
164};
165
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300166struct MemoryRegionIoeventfd {
167 AddrRange addr;
168 bool match_data;
169 uint64_t data;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200170 EventNotifier *e;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300171};
172
173static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
174 MemoryRegionIoeventfd b)
175{
Avi Kivity08dafab2011-10-16 13:19:17 +0200176 if (int128_lt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300177 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200178 } else if (int128_gt(a.addr.start, b.addr.start)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300179 return false;
Avi Kivity08dafab2011-10-16 13:19:17 +0200180 } else if (int128_lt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300181 return true;
Avi Kivity08dafab2011-10-16 13:19:17 +0200182 } else if (int128_gt(a.addr.size, b.addr.size)) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300183 return false;
184 } else if (a.match_data < b.match_data) {
185 return true;
186 } else if (a.match_data > b.match_data) {
187 return false;
188 } else if (a.match_data) {
189 if (a.data < b.data) {
190 return true;
191 } else if (a.data > b.data) {
192 return false;
193 }
194 }
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200195 if (a.e < b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300196 return true;
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200197 } else if (a.e > b.e) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300198 return false;
199 }
200 return false;
201}
202
203static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
204 MemoryRegionIoeventfd b)
205{
206 return !memory_region_ioeventfd_before(a, b)
207 && !memory_region_ioeventfd_before(b, a);
208}
209
Avi Kivity093bc2c2011-07-26 14:26:01 +0300210typedef struct FlatRange FlatRange;
211typedef struct FlatView FlatView;
212
213/* Range of memory in the global map. Addresses are absolute. */
214struct FlatRange {
215 MemoryRegion *mr;
Avi Kivitya8170e52012-10-23 12:30:10 +0200216 hwaddr offset_in_region;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300217 AddrRange addr;
Avi Kivity5a583342011-07-26 14:26:02 +0300218 uint8_t dirty_log_mask;
Paolo Bonzinib138e652016-05-24 21:26:28 +0200219 bool romd_mode;
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300220 bool readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300221};
222
223/* Flattened global view of current active memory hierarchy. Kept in sorted
224 * order.
225 */
226struct FlatView {
Paolo Bonzini374f2982013-05-17 12:37:03 +0200227 struct rcu_head rcu;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200228 unsigned ref;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300229 FlatRange *ranges;
230 unsigned nr;
231 unsigned nr_allocated;
232};
233
Avi Kivitycc31e6e2011-07-26 14:26:05 +0300234typedef struct AddressSpaceOps AddressSpaceOps;
235
Avi Kivity093bc2c2011-07-26 14:26:01 +0300236#define FOR_EACH_FLAT_RANGE(var, view) \
237 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
238
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200239static inline MemoryRegionSection
240section_from_flat_range(FlatRange *fr, AddressSpace *as)
241{
242 return (MemoryRegionSection) {
243 .mr = fr->mr,
244 .address_space = as,
245 .offset_within_region = fr->offset_in_region,
246 .size = fr->addr.size,
247 .offset_within_address_space = int128_get64(fr->addr.start),
248 .readonly = fr->readonly,
249 };
250}
251
Avi Kivity093bc2c2011-07-26 14:26:01 +0300252static bool flatrange_equal(FlatRange *a, FlatRange *b)
253{
254 return a->mr == b->mr
255 && addrrange_equal(a->addr, b->addr)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300256 && a->offset_in_region == b->offset_in_region
Paolo Bonzinib138e652016-05-24 21:26:28 +0200257 && a->romd_mode == b->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300258 && a->readonly == b->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300259}
260
261static void flatview_init(FlatView *view)
262{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200263 view->ref = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300264 view->ranges = NULL;
265 view->nr = 0;
266 view->nr_allocated = 0;
267}
268
269/* Insert a range into a given position. Caller is responsible for maintaining
270 * sorting order.
271 */
272static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
273{
274 if (view->nr == view->nr_allocated) {
275 view->nr_allocated = MAX(2 * view->nr, 10);
Anthony Liguori7267c092011-08-20 22:09:37 -0500276 view->ranges = g_realloc(view->ranges,
Avi Kivity093bc2c2011-07-26 14:26:01 +0300277 view->nr_allocated * sizeof(*view->ranges));
278 }
279 memmove(view->ranges + pos + 1, view->ranges + pos,
280 (view->nr - pos) * sizeof(FlatRange));
281 view->ranges[pos] = *range;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200282 memory_region_ref(range->mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300283 ++view->nr;
284}
285
286static void flatview_destroy(FlatView *view)
287{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200288 int i;
289
290 for (i = 0; i < view->nr; i++) {
291 memory_region_unref(view->ranges[i].mr);
292 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500293 g_free(view->ranges);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200294 g_free(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300295}
296
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200297static bool flatview_ref(FlatView *view)
Paolo Bonzini856d7242013-05-06 11:57:21 +0200298{
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200299 return atomic_fetch_inc_nonzero(&view->ref) > 0;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200300}
301
302static void flatview_unref(FlatView *view)
303{
304 if (atomic_fetch_dec(&view->ref) == 1) {
305 flatview_destroy(view);
306 }
307}
308
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300309static bool can_merge(FlatRange *r1, FlatRange *r2)
310{
Avi Kivity08dafab2011-10-16 13:19:17 +0200311 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300312 && r1->mr == r2->mr
Avi Kivity08dafab2011-10-16 13:19:17 +0200313 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
314 r1->addr.size),
315 int128_make64(r2->offset_in_region))
Avi Kivityd0a9b5b2011-08-08 19:58:49 +0300316 && r1->dirty_log_mask == r2->dirty_log_mask
Paolo Bonzinib138e652016-05-24 21:26:28 +0200317 && r1->romd_mode == r2->romd_mode
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300318 && r1->readonly == r2->readonly;
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300319}
320
Peter Crosthwaite8508e022013-06-03 15:31:56 +1000321/* Attempt to simplify a view by merging adjacent ranges */
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300322static void flatview_simplify(FlatView *view)
323{
324 unsigned i, j;
325
326 i = 0;
327 while (i < view->nr) {
328 j = i + 1;
329 while (j < view->nr
330 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200331 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
Avi Kivity3d8e6bf2011-07-26 14:26:03 +0300332 ++j;
333 }
334 ++i;
335 memmove(&view->ranges[i], &view->ranges[j],
336 (view->nr - j) * sizeof(view->ranges[j]));
337 view->nr -= j - i;
338 }
339}
340
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200341static bool memory_region_big_endian(MemoryRegion *mr)
342{
343#ifdef TARGET_WORDS_BIGENDIAN
344 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
345#else
346 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
347#endif
348}
349
Paolo Bonzinie11ef3d2013-07-22 15:54:35 +0200350static bool memory_region_wrong_endianness(MemoryRegion *mr)
351{
352#ifdef TARGET_WORDS_BIGENDIAN
353 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
354#else
355 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
356#endif
357}
358
359static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
360{
361 if (memory_region_wrong_endianness(mr)) {
362 switch (size) {
363 case 1:
364 break;
365 case 2:
366 *data = bswap16(*data);
367 break;
368 case 4:
369 *data = bswap32(*data);
370 break;
371 case 8:
372 *data = bswap64(*data);
373 break;
374 default:
375 abort();
376 }
377 }
378}
379
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800380static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
381{
382 MemoryRegion *root;
383 hwaddr abs_addr = offset;
384
385 abs_addr += mr->addr;
386 for (root = mr; root->container; ) {
387 root = root->container;
388 abs_addr += root->addr;
389 }
390
391 return abs_addr;
392}
393
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800394static int get_cpu_index(void)
395{
396 if (current_cpu) {
397 return current_cpu->cpu_index;
398 }
399 return -1;
400}
401
Peter Maydellcc05c432015-04-26 16:49:23 +0100402static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
403 hwaddr addr,
404 uint64_t *value,
405 unsigned size,
406 unsigned shift,
407 uint64_t mask,
408 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200409{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200410 uint64_t tmp;
411
412 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800413 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800414 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800415 } else if (mr == &io_mem_notdirty) {
416 /* Accesses to code which has previously been translated into a TB show
417 * up in the MMIO path, as accesses to the io_mem_notdirty
418 * MemoryRegion. */
419 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800420 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
421 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800422 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800423 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200424 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100425 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200426}
427
Peter Maydellcc05c432015-04-26 16:49:23 +0100428static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
429 hwaddr addr,
430 uint64_t *value,
431 unsigned size,
432 unsigned shift,
433 uint64_t mask,
434 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300435{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300436 uint64_t tmp;
437
438 tmp = mr->ops->read(mr->opaque, addr, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800439 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800440 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800441 } else if (mr == &io_mem_notdirty) {
442 /* Accesses to code which has previously been translated into a TB show
443 * up in the MMIO path, as accesses to the io_mem_notdirty
444 * MemoryRegion. */
445 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800446 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
447 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800448 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800449 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300450 *value |= (tmp & mask) << shift;
Peter Maydellcc05c432015-04-26 16:49:23 +0100451 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300452}
453
Peter Maydellcc05c432015-04-26 16:49:23 +0100454static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
455 hwaddr addr,
456 uint64_t *value,
457 unsigned size,
458 unsigned shift,
459 uint64_t mask,
460 MemTxAttrs attrs)
461{
462 uint64_t tmp = 0;
463 MemTxResult r;
464
Peter Maydellcc05c432015-04-26 16:49:23 +0100465 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800466 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800467 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800468 } else if (mr == &io_mem_notdirty) {
469 /* Accesses to code which has previously been translated into a TB show
470 * up in the MMIO path, as accesses to the io_mem_notdirty
471 * MemoryRegion. */
472 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800473 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
474 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800475 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800476 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100477 *value |= (tmp & mask) << shift;
478 return r;
479}
480
481static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
482 hwaddr addr,
483 uint64_t *value,
484 unsigned size,
485 unsigned shift,
486 uint64_t mask,
487 MemTxAttrs attrs)
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200488{
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200489 uint64_t tmp;
490
491 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800492 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800493 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800494 } else if (mr == &io_mem_notdirty) {
495 /* Accesses to code which has previously been translated into a TB show
496 * up in the MMIO path, as accesses to the io_mem_notdirty
497 * MemoryRegion. */
498 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800499 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
500 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800501 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800502 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200503 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
Peter Maydellcc05c432015-04-26 16:49:23 +0100504 return MEMTX_OK;
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200505}
506
Peter Maydellcc05c432015-04-26 16:49:23 +0100507static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
508 hwaddr addr,
509 uint64_t *value,
510 unsigned size,
511 unsigned shift,
512 uint64_t mask,
513 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300514{
Avi Kivity164a4dc2011-08-11 10:40:25 +0300515 uint64_t tmp;
516
517 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800518 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800519 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800520 } else if (mr == &io_mem_notdirty) {
521 /* Accesses to code which has previously been translated into a TB show
522 * up in the MMIO path, as accesses to the io_mem_notdirty
523 * MemoryRegion. */
524 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800525 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
526 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800527 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800528 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300529 mr->ops->write(mr->opaque, addr, tmp, size);
Peter Maydellcc05c432015-04-26 16:49:23 +0100530 return MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300531}
532
Peter Maydellcc05c432015-04-26 16:49:23 +0100533static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
534 hwaddr addr,
535 uint64_t *value,
536 unsigned size,
537 unsigned shift,
538 uint64_t mask,
539 MemTxAttrs attrs)
540{
541 uint64_t tmp;
542
Peter Maydellcc05c432015-04-26 16:49:23 +0100543 tmp = (*value >> shift) & mask;
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800544 if (mr->subpage) {
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800545 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
Hollis Blanchardf2d08942016-03-02 12:12:55 -0800546 } else if (mr == &io_mem_notdirty) {
547 /* Accesses to code which has previously been translated into a TB show
548 * up in the MMIO path, as accesses to the io_mem_notdirty
549 * MemoryRegion. */
550 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
Hollis Blanchard4779dc12016-02-08 16:03:05 -0800551 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
552 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
Hollis Blanchard5a68be92016-03-02 12:12:54 -0800553 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
Hollis Blanchard23d92d62016-02-08 16:03:04 -0800554 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100555 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
556}
557
558static MemTxResult access_with_adjusted_size(hwaddr addr,
Avi Kivity164a4dc2011-08-11 10:40:25 +0300559 uint64_t *value,
560 unsigned size,
561 unsigned access_size_min,
562 unsigned access_size_max,
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200563 MemTxResult (*access_fn)
564 (MemoryRegion *mr,
565 hwaddr addr,
566 uint64_t *value,
567 unsigned size,
568 unsigned shift,
569 uint64_t mask,
570 MemTxAttrs attrs),
Peter Maydellcc05c432015-04-26 16:49:23 +0100571 MemoryRegion *mr,
572 MemTxAttrs attrs)
Avi Kivity164a4dc2011-08-11 10:40:25 +0300573{
574 uint64_t access_mask;
575 unsigned access_size;
576 unsigned i;
Peter Maydellcc05c432015-04-26 16:49:23 +0100577 MemTxResult r = MEMTX_OK;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300578
579 if (!access_size_min) {
580 access_size_min = 1;
581 }
582 if (!access_size_max) {
583 access_size_max = 4;
584 }
Paolo Bonzinice5d2f32013-05-24 17:45:48 +0200585
586 /* FIXME: support unaligned access? */
Avi Kivity164a4dc2011-08-11 10:40:25 +0300587 access_size = MAX(MIN(size, access_size_max), access_size_min);
588 access_mask = -1ULL >> (64 - access_size * 8);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200589 if (memory_region_big_endian(mr)) {
590 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200591 r |= access_fn(mr, addr + i, value, access_size,
Peter Maydellcc05c432015-04-26 16:49:23 +0100592 (size - access_size - i) * 8, access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200593 }
594 } else {
595 for (i = 0; i < size; i += access_size) {
KONRAD Frederic05e015f2017-09-21 12:04:20 +0200596 r |= access_fn(mr, addr + i, value, access_size, i * 8,
Peter Maydellcc05c432015-04-26 16:49:23 +0100597 access_mask, attrs);
Paolo Bonzinie7342aa2013-07-22 15:54:37 +0200598 }
Avi Kivity164a4dc2011-08-11 10:40:25 +0300599 }
Peter Maydellcc05c432015-04-26 16:49:23 +0100600 return r;
Avi Kivity164a4dc2011-08-11 10:40:25 +0300601}
602
Avi Kivitye2177952011-12-08 15:00:18 +0200603static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
604{
Avi Kivity0d673e32012-10-02 15:28:50 +0200605 AddressSpace *as;
606
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +0200607 while (mr->container) {
608 mr = mr->container;
Avi Kivitye2177952011-12-08 15:00:18 +0200609 }
Avi Kivity0d673e32012-10-02 15:28:50 +0200610 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
611 if (mr == as->root) {
612 return as;
613 }
Avi Kivitye2177952011-12-08 15:00:18 +0200614 }
Igor Mammedoveed2bac2014-06-02 15:25:06 +0200615 return NULL;
Avi Kivitye2177952011-12-08 15:00:18 +0200616}
617
Avi Kivity093bc2c2011-07-26 14:26:01 +0300618/* Render a memory region into the global view. Ranges in @view obscure
619 * ranges in @mr.
620 */
621static void render_memory_region(FlatView *view,
622 MemoryRegion *mr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200623 Int128 base,
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300624 AddrRange clip,
625 bool readonly)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300626{
627 MemoryRegion *subregion;
628 unsigned i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200629 hwaddr offset_in_region;
Avi Kivity08dafab2011-10-16 13:19:17 +0200630 Int128 remain;
631 Int128 now;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300632 FlatRange fr;
633 AddrRange tmp;
634
Avi Kivity6bba19b2011-09-14 11:54:58 +0300635 if (!mr->enabled) {
636 return;
637 }
638
Avi Kivity08dafab2011-10-16 13:19:17 +0200639 int128_addto(&base, int128_make64(mr->addr));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300640 readonly |= mr->readonly;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300641
642 tmp = addrrange_make(base, mr->size);
643
644 if (!addrrange_intersects(tmp, clip)) {
645 return;
646 }
647
648 clip = addrrange_intersection(tmp, clip);
649
650 if (mr->alias) {
Avi Kivity08dafab2011-10-16 13:19:17 +0200651 int128_subfrom(&base, int128_make64(mr->alias->addr));
652 int128_subfrom(&base, int128_make64(mr->alias_offset));
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300653 render_memory_region(view, mr->alias, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300654 return;
655 }
656
657 /* Render subregions in priority order. */
658 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
Avi Kivityfb1cd6f2011-09-25 14:48:47 +0300659 render_memory_region(view, subregion, base, clip, readonly);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300660 }
661
Avi Kivity14a3c102011-07-26 14:26:06 +0300662 if (!mr->terminates) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300663 return;
664 }
665
Avi Kivity08dafab2011-10-16 13:19:17 +0200666 offset_in_region = int128_get64(int128_sub(clip.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300667 base = clip.start;
668 remain = clip.size;
669
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000670 fr.mr = mr;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +0100671 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzinib138e652016-05-24 21:26:28 +0200672 fr.romd_mode = mr->romd_mode;
Peter Crosthwaite2eb74e12013-06-03 15:33:29 +1000673 fr.readonly = readonly;
674
Avi Kivity093bc2c2011-07-26 14:26:01 +0300675 /* Render the region itself into any gaps left by the current view. */
Avi Kivity08dafab2011-10-16 13:19:17 +0200676 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
677 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300678 continue;
679 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200680 if (int128_lt(base, view->ranges[i].addr.start)) {
681 now = int128_min(remain,
682 int128_sub(view->ranges[i].addr.start, base));
Avi Kivity093bc2c2011-07-26 14:26:01 +0300683 fr.offset_in_region = offset_in_region;
684 fr.addr = addrrange_make(base, now);
685 flatview_insert(view, i, &fr);
686 ++i;
Avi Kivity08dafab2011-10-16 13:19:17 +0200687 int128_addto(&base, now);
688 offset_in_region += int128_get64(now);
689 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300690 }
Avi Kivityd26a8ca2012-10-29 18:22:36 +0200691 now = int128_sub(int128_min(int128_add(base, remain),
692 addrrange_end(view->ranges[i].addr)),
693 base);
694 int128_addto(&base, now);
695 offset_in_region += int128_get64(now);
696 int128_subfrom(&remain, now);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300697 }
Avi Kivity08dafab2011-10-16 13:19:17 +0200698 if (int128_nz(remain)) {
Avi Kivity093bc2c2011-07-26 14:26:01 +0300699 fr.offset_in_region = offset_in_region;
700 fr.addr = addrrange_make(base, remain);
701 flatview_insert(view, i, &fr);
702 }
703}
704
705/* Render a memory topology into a list of disjoint absolute ranges. */
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200706static FlatView *generate_memory_topology(MemoryRegion *mr)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300707{
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200708 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300709
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200710 view = g_new(FlatView, 1);
711 flatview_init(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300712
Avi Kivity83f3c252012-10-07 12:59:55 +0200713 if (mr) {
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200714 render_memory_region(view, mr, int128_zero(),
Avi Kivity83f3c252012-10-07 12:59:55 +0200715 addrrange_make(int128_zero(), int128_2_64()), false);
716 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200717 flatview_simplify(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300718
719 return view;
720}
721
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300722static void address_space_add_del_ioeventfds(AddressSpace *as,
723 MemoryRegionIoeventfd *fds_new,
724 unsigned fds_new_nb,
725 MemoryRegionIoeventfd *fds_old,
726 unsigned fds_old_nb)
727{
728 unsigned iold, inew;
Avi Kivity80a1ea32012-02-08 16:39:06 +0200729 MemoryRegionIoeventfd *fd;
730 MemoryRegionSection section;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300731
732 /* Generate a symmetric difference of the old and new fd sets, adding
733 * and deleting as necessary.
734 */
735
736 iold = inew = 0;
737 while (iold < fds_old_nb || inew < fds_new_nb) {
738 if (iold < fds_old_nb
739 && (inew == fds_new_nb
740 || memory_region_ioeventfd_before(fds_old[iold],
741 fds_new[inew]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200742 fd = &fds_old[iold];
743 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200744 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200745 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200746 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200747 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200748 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200749 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300750 ++iold;
751 } else if (inew < fds_new_nb
752 && (iold == fds_old_nb
753 || memory_region_ioeventfd_before(fds_new[inew],
754 fds_old[iold]))) {
Avi Kivity80a1ea32012-02-08 16:39:06 +0200755 fd = &fds_new[inew];
756 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +0200757 .address_space = as,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200758 .offset_within_address_space = int128_get64(fd->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200759 .size = fd->addr.size,
Avi Kivity80a1ea32012-02-08 16:39:06 +0200760 };
Paolo Bonzini9a546352016-09-22 16:23:06 +0200761 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +0200762 fd->match_data, fd->data, fd->e);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300763 ++inew;
764 } else {
765 ++iold;
766 ++inew;
767 }
768 }
769}
770
Paolo Bonzini856d7242013-05-06 11:57:21 +0200771static FlatView *address_space_get_flatview(AddressSpace *as)
772{
773 FlatView *view;
774
Paolo Bonzini374f2982013-05-17 12:37:03 +0200775 rcu_read_lock();
Paolo Bonzini447b0d02017-09-21 14:32:47 +0200776 do {
777 view = atomic_rcu_read(&as->current_map);
778 /* If somebody has replaced as->current_map concurrently,
779 * flatview_ref returns false.
780 */
781 } while (!flatview_ref(view));
Paolo Bonzini374f2982013-05-17 12:37:03 +0200782 rcu_read_unlock();
Paolo Bonzini856d7242013-05-06 11:57:21 +0200783 return view;
784}
785
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300786static void address_space_update_ioeventfds(AddressSpace *as)
787{
Paolo Bonzini99e86342013-05-06 10:26:13 +0200788 FlatView *view;
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300789 FlatRange *fr;
790 unsigned ioeventfd_nb = 0;
791 MemoryRegionIoeventfd *ioeventfds = NULL;
792 AddrRange tmp;
793 unsigned i;
794
Paolo Bonzini856d7242013-05-06 11:57:21 +0200795 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +0200796 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300797 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
798 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
Avi Kivity08dafab2011-10-16 13:19:17 +0200799 int128_sub(fr->addr.start,
800 int128_make64(fr->offset_in_region)));
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300801 if (addrrange_intersects(fr->addr, tmp)) {
802 ++ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -0500803 ioeventfds = g_realloc(ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300804 ioeventfd_nb * sizeof(*ioeventfds));
805 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
806 ioeventfds[ioeventfd_nb-1].addr = tmp;
807 }
808 }
809 }
810
811 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
812 as->ioeventfds, as->ioeventfd_nb);
813
Anthony Liguori7267c092011-08-20 22:09:37 -0500814 g_free(as->ioeventfds);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300815 as->ioeventfds = ioeventfds;
816 as->ioeventfd_nb = ioeventfd_nb;
Paolo Bonzini856d7242013-05-06 11:57:21 +0200817 flatview_unref(view);
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300818}
819
Avi Kivityb8af1af2011-07-26 14:26:12 +0300820static void address_space_update_topology_pass(AddressSpace *as,
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200821 const FlatView *old_view,
822 const FlatView *new_view,
Avi Kivityb8af1af2011-07-26 14:26:12 +0300823 bool adding)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300824{
Avi Kivity093bc2c2011-07-26 14:26:01 +0300825 unsigned iold, inew;
826 FlatRange *frold, *frnew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300827
828 /* Generate a symmetric difference of the old and new memory maps.
829 * Kill ranges in the old map, and instantiate ranges in the new map.
830 */
831 iold = inew = 0;
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200832 while (iold < old_view->nr || inew < new_view->nr) {
833 if (iold < old_view->nr) {
834 frold = &old_view->ranges[iold];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300835 } else {
836 frold = NULL;
837 }
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200838 if (inew < new_view->nr) {
839 frnew = &new_view->ranges[inew];
Avi Kivity093bc2c2011-07-26 14:26:01 +0300840 } else {
841 frnew = NULL;
842 }
843
844 if (frold
845 && (!frnew
Avi Kivity08dafab2011-10-16 13:19:17 +0200846 || int128_lt(frold->addr.start, frnew->addr.start)
847 || (int128_eq(frold->addr.start, frnew->addr.start)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300848 && !flatrange_equal(frold, frnew)))) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000849 /* In old but not in new, or in both but attributes changed. */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300850
Avi Kivityb8af1af2011-07-26 14:26:12 +0300851 if (!adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200852 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300853 }
854
Avi Kivity093bc2c2011-07-26 14:26:01 +0300855 ++iold;
856 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
Peter Crosthwaite41a6e472013-06-03 15:32:42 +1000857 /* In both and unchanged (except logging may have changed) */
Avi Kivity093bc2c2011-07-26 14:26:01 +0300858
Avi Kivityb8af1af2011-07-26 14:26:12 +0300859 if (adding) {
Avi Kivity50c1e142012-02-08 21:36:02 +0200860 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
Paolo Bonzinib2dfd712015-04-25 14:38:30 +0200861 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
862 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
863 frold->dirty_log_mask,
864 frnew->dirty_log_mask);
865 }
866 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
867 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
868 frold->dirty_log_mask,
869 frnew->dirty_log_mask);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300870 }
Avi Kivity5a583342011-07-26 14:26:02 +0300871 }
872
Avi Kivity093bc2c2011-07-26 14:26:01 +0300873 ++iold;
874 ++inew;
Avi Kivity093bc2c2011-07-26 14:26:01 +0300875 } else {
876 /* In new */
877
Avi Kivityb8af1af2011-07-26 14:26:12 +0300878 if (adding) {
Avi Kivity72e22d22012-02-08 15:05:50 +0200879 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300880 }
881
Avi Kivity093bc2c2011-07-26 14:26:01 +0300882 ++inew;
883 }
884 }
Avi Kivityb8af1af2011-07-26 14:26:12 +0300885}
886
887
888static void address_space_update_topology(AddressSpace *as)
889{
Paolo Bonzini856d7242013-05-06 11:57:21 +0200890 FlatView *old_view = address_space_get_flatview(as);
Paolo Bonzinia9a0c062013-05-06 10:29:07 +0200891 FlatView *new_view = generate_memory_topology(as->root);
Avi Kivityb8af1af2011-07-26 14:26:12 +0300892
893 address_space_update_topology_pass(as, old_view, new_view, false);
894 address_space_update_topology_pass(as, old_view, new_view, true);
895
Paolo Bonzini374f2982013-05-17 12:37:03 +0200896 /* Writes are protected by the BQL. */
897 atomic_rcu_set(&as->current_map, new_view);
898 call_rcu(old_view, flatview_unref, rcu);
Paolo Bonzini856d7242013-05-06 11:57:21 +0200899
900 /* Note that all the old MemoryRegions are still alive up to this
901 * point. This relieves most MemoryListeners from the need to
902 * ref/unref the MemoryRegions they get---unless they use them
903 * outside the iothread mutex, in which case precise reference
904 * counting is necessary.
905 */
906 flatview_unref(old_view);
907
Avi Kivity3e9d69e2011-07-26 14:26:11 +0300908 address_space_update_ioeventfds(as);
Avi Kivity093bc2c2011-07-26 14:26:01 +0300909}
910
Avi Kivity4ef4db82011-07-26 14:26:13 +0300911void memory_region_transaction_begin(void)
912{
Jan Kiszkabb880de2012-08-23 13:02:32 +0200913 qemu_flush_coalesced_mmio_buffer();
Avi Kivity4ef4db82011-07-26 14:26:13 +0300914 ++memory_region_transaction_depth;
915}
916
917void memory_region_transaction_commit(void)
918{
Avi Kivity0d673e32012-10-02 15:28:50 +0200919 AddressSpace *as;
920
Avi Kivity4ef4db82011-07-26 14:26:13 +0300921 assert(memory_region_transaction_depth);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000922 assert(qemu_mutex_iothread_locked());
923
Avi Kivity4ef4db82011-07-26 14:26:13 +0300924 --memory_region_transaction_depth;
Gonglei4dc56152014-05-08 11:47:32 +0800925 if (!memory_region_transaction_depth) {
926 if (memory_region_update_pending) {
927 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
Jan Kiszka02e2b952012-08-23 13:02:31 +0200928
Gonglei4dc56152014-05-08 11:47:32 +0800929 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
930 address_space_update_topology(as);
931 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000932 memory_region_update_pending = false;
Gonglei4dc56152014-05-08 11:47:32 +0800933 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
934 } else if (ioeventfd_update_pending) {
935 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
936 address_space_update_ioeventfds(as);
937 }
Xu, Anthonyade9c1a2017-03-22 17:53:35 +0000938 ioeventfd_update_pending = false;
Jan Kiszka02e2b952012-08-23 13:02:31 +0200939 }
Gonglei4dc56152014-05-08 11:47:32 +0800940 }
Avi Kivity4ef4db82011-07-26 14:26:13 +0300941}
942
Avi Kivity545e92e2011-08-08 19:58:48 +0300943static void memory_region_destructor_none(MemoryRegion *mr)
944{
945}
946
947static void memory_region_destructor_ram(MemoryRegion *mr)
948{
Fam Zhengf1060c52016-03-01 14:18:22 +0800949 qemu_ram_free(mr->ram_block);
Avi Kivity545e92e2011-08-08 19:58:48 +0300950}
951
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700952static bool memory_region_need_escape(char c)
953{
954 return c == '/' || c == '[' || c == '\\' || c == ']';
955}
956
957static char *memory_region_escape_name(const char *name)
958{
959 const char *p;
960 char *escaped, *q;
961 uint8_t c;
962 size_t bytes = 0;
963
964 for (p = name; *p; p++) {
965 bytes += memory_region_need_escape(*p) ? 4 : 1;
966 }
967 if (bytes == p - name) {
968 return g_memdup(name, bytes + 1);
969 }
970
971 escaped = g_malloc(bytes + 1);
972 for (p = name, q = escaped; *p; p++) {
973 c = *p;
974 if (unlikely(memory_region_need_escape(c))) {
975 *q++ = '\\';
976 *q++ = 'x';
977 *q++ = "0123456789abcdef"[c >> 4];
978 c = "0123456789abcdef"[c & 15];
979 }
980 *q++ = c;
981 }
982 *q = 0;
983 return escaped;
984}
985
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +1000986static void memory_region_do_init(MemoryRegion *mr,
987 Object *owner,
988 const char *name,
989 uint64_t size)
Avi Kivity093bc2c2011-07-26 14:26:01 +0300990{
Avi Kivity08dafab2011-10-16 13:19:17 +0200991 mr->size = int128_make64(size);
992 if (size == UINT64_MAX) {
993 mr->size = int128_2_64();
994 }
Peter Maydell302fa282014-08-19 20:05:46 +0100995 mr->name = g_strdup(name);
Paolo Bonzini612263c2015-12-09 11:44:25 +0100996 mr->owner = owner;
Gonglei58eaa212016-02-22 16:34:55 +0800997 mr->ram_block = NULL;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700998
999 if (name) {
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001000 char *escaped_name = memory_region_escape_name(name);
1001 char *name_array = g_strdup_printf("%s[*]", escaped_name);
Paolo Bonzini612263c2015-12-09 11:44:25 +01001002
1003 if (!owner) {
1004 owner = container_get(qdev_get_machine(), "/unattached");
1005 }
1006
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001007 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001008 object_unref(OBJECT(mr));
Peter Crosthwaite843ef732014-08-19 23:56:26 -07001009 g_free(name_array);
1010 g_free(escaped_name);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001011 }
1012}
1013
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001014void memory_region_init(MemoryRegion *mr,
1015 Object *owner,
1016 const char *name,
1017 uint64_t size)
1018{
1019 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1020 memory_region_do_init(mr, owner, name, size);
1021}
1022
Eric Blaked7bce992016-01-29 06:48:55 -07001023static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1024 void *opaque, Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001025{
1026 MemoryRegion *mr = MEMORY_REGION(obj);
1027 uint64_t value = mr->addr;
1028
Eric Blake51e72bc2016-01-29 06:48:54 -07001029 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001030}
1031
Eric Blaked7bce992016-01-29 06:48:55 -07001032static void memory_region_get_container(Object *obj, Visitor *v,
1033 const char *name, void *opaque,
1034 Error **errp)
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001035{
1036 MemoryRegion *mr = MEMORY_REGION(obj);
1037 gchar *path = (gchar *)"";
1038
1039 if (mr->container) {
1040 path = object_get_canonical_path(OBJECT(mr->container));
1041 }
Eric Blake51e72bc2016-01-29 06:48:54 -07001042 visit_type_str(v, name, &path, errp);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001043 if (mr->container) {
1044 g_free(path);
1045 }
1046}
1047
1048static Object *memory_region_resolve_container(Object *obj, void *opaque,
1049 const char *part)
1050{
1051 MemoryRegion *mr = MEMORY_REGION(obj);
1052
1053 return OBJECT(mr->container);
1054}
1055
Eric Blaked7bce992016-01-29 06:48:55 -07001056static void memory_region_get_priority(Object *obj, Visitor *v,
1057 const char *name, void *opaque,
1058 Error **errp)
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001059{
1060 MemoryRegion *mr = MEMORY_REGION(obj);
1061 int32_t value = mr->priority;
1062
Eric Blake51e72bc2016-01-29 06:48:54 -07001063 visit_type_int32(v, name, &value, errp);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001064}
1065
Eric Blaked7bce992016-01-29 06:48:55 -07001066static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1067 void *opaque, Error **errp)
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001068{
1069 MemoryRegion *mr = MEMORY_REGION(obj);
1070 uint64_t value = memory_region_size(mr);
1071
Eric Blake51e72bc2016-01-29 06:48:54 -07001072 visit_type_uint64(v, name, &value, errp);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001073}
1074
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001075static void memory_region_initfn(Object *obj)
1076{
1077 MemoryRegion *mr = MEMORY_REGION(obj);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001078 ObjectProperty *op;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001079
1080 mr->ops = &unassigned_mem_ops;
1081 mr->enabled = true;
1082 mr->romd_mode = true;
Jan Kiszka196ea132015-06-18 18:47:20 +02001083 mr->global_locking = true;
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001084 mr->destructor = memory_region_destructor_none;
1085 QTAILQ_INIT(&mr->subregions);
1086 QTAILQ_INIT(&mr->coalesced);
Peter Crosthwaite409ddd02014-06-05 23:16:27 -07001087
1088 op = object_property_add(OBJECT(mr), "container",
1089 "link<" TYPE_MEMORY_REGION ">",
1090 memory_region_get_container,
1091 NULL, /* memory_region_set_container */
1092 NULL, NULL, &error_abort);
1093 op->resolve = memory_region_resolve_container;
1094
1095 object_property_add(OBJECT(mr), "addr", "uint64",
1096 memory_region_get_addr,
1097 NULL, /* memory_region_set_addr */
1098 NULL, NULL, &error_abort);
Peter Crosthwaited33382d2014-06-05 23:17:01 -07001099 object_property_add(OBJECT(mr), "priority", "uint32",
1100 memory_region_get_priority,
1101 NULL, /* memory_region_set_priority */
1102 NULL, NULL, &error_abort);
Peter Crosthwaite52aef7b2014-06-05 23:17:35 -07001103 object_property_add(OBJECT(mr), "size", "uint64",
1104 memory_region_get_size,
1105 NULL, /* memory_region_set_size, */
1106 NULL, NULL, &error_abort);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001107}
1108
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001109static void iommu_memory_region_initfn(Object *obj)
1110{
1111 MemoryRegion *mr = MEMORY_REGION(obj);
1112
1113 mr->is_iommu = true;
1114}
1115
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001116static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1117 unsigned size)
1118{
1119#ifdef DEBUG_UNASSIGNED
1120 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1121#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001122 if (current_cpu != NULL) {
1123 cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001124 }
Jan Kiszka68a74392013-09-02 18:43:31 +02001125 return 0;
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001126}
1127
1128static void unassigned_mem_write(void *opaque, hwaddr addr,
1129 uint64_t val, unsigned size)
1130{
1131#ifdef DEBUG_UNASSIGNED
1132 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1133#endif
Andreas Färber4917cf42013-05-27 05:17:50 +02001134 if (current_cpu != NULL) {
1135 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
Andreas Färberc658b942013-05-27 06:49:53 +02001136 }
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001137}
1138
Paolo Bonzinid1970632013-05-24 13:23:38 +02001139static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1140 unsigned size, bool is_write)
1141{
1142 return false;
1143}
1144
1145const MemoryRegionOps unassigned_mem_ops = {
1146 .valid.accepts = unassigned_mem_accepts,
1147 .endianness = DEVICE_NATIVE_ENDIAN,
1148};
1149
Alex Williamson4a2e2422016-10-31 09:53:03 -06001150static uint64_t memory_region_ram_device_read(void *opaque,
1151 hwaddr addr, unsigned size)
1152{
1153 MemoryRegion *mr = opaque;
1154 uint64_t data = (uint64_t)~0;
1155
1156 switch (size) {
1157 case 1:
1158 data = *(uint8_t *)(mr->ram_block->host + addr);
1159 break;
1160 case 2:
1161 data = *(uint16_t *)(mr->ram_block->host + addr);
1162 break;
1163 case 4:
1164 data = *(uint32_t *)(mr->ram_block->host + addr);
1165 break;
1166 case 8:
1167 data = *(uint64_t *)(mr->ram_block->host + addr);
1168 break;
1169 }
1170
1171 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1172
1173 return data;
1174}
1175
1176static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1177 uint64_t data, unsigned size)
1178{
1179 MemoryRegion *mr = opaque;
1180
1181 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1182
1183 switch (size) {
1184 case 1:
1185 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1186 break;
1187 case 2:
1188 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1189 break;
1190 case 4:
1191 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1192 break;
1193 case 8:
1194 *(uint64_t *)(mr->ram_block->host + addr) = data;
1195 break;
1196 }
1197}
1198
1199static const MemoryRegionOps ram_device_mem_ops = {
1200 .read = memory_region_ram_device_read,
1201 .write = memory_region_ram_device_write,
Yongji Xiec99a29e2017-02-27 12:52:44 +08001202 .endianness = DEVICE_HOST_ENDIAN,
Alex Williamson4a2e2422016-10-31 09:53:03 -06001203 .valid = {
1204 .min_access_size = 1,
1205 .max_access_size = 8,
1206 .unaligned = true,
1207 },
1208 .impl = {
1209 .min_access_size = 1,
1210 .max_access_size = 8,
1211 .unaligned = true,
1212 },
1213};
1214
Paolo Bonzinid2702032013-05-24 11:55:06 +02001215bool memory_region_access_valid(MemoryRegion *mr,
1216 hwaddr addr,
1217 unsigned size,
1218 bool is_write)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001219{
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001220 int access_size_min, access_size_max;
1221 int access_size, i;
Avi Kivity897fa7c2011-11-13 13:05:27 +02001222
Avi Kivity093bc2c2011-07-26 14:26:01 +03001223 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1224 return false;
1225 }
1226
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001227 if (!mr->ops->valid.accepts) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001228 return true;
1229 }
1230
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001231 access_size_min = mr->ops->valid.min_access_size;
1232 if (!mr->ops->valid.min_access_size) {
1233 access_size_min = 1;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001234 }
Paolo Bonzinia014ed02013-05-24 17:48:52 +02001235
1236 access_size_max = mr->ops->valid.max_access_size;
1237 if (!mr->ops->valid.max_access_size) {
1238 access_size_max = 4;
1239 }
1240
1241 access_size = MAX(MIN(size, access_size_max), access_size_min);
1242 for (i = 0; i < size; i += access_size) {
1243 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1244 is_write)) {
1245 return false;
1246 }
1247 }
1248
Avi Kivity093bc2c2011-07-26 14:26:01 +03001249 return true;
1250}
1251
Peter Maydellcc05c432015-04-26 16:49:23 +01001252static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1253 hwaddr addr,
1254 uint64_t *pval,
1255 unsigned size,
1256 MemTxAttrs attrs)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001257{
Peter Maydellcc05c432015-04-26 16:49:23 +01001258 *pval = 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001259
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001260 if (mr->ops->read) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001261 return access_with_adjusted_size(addr, pval, size,
1262 mr->ops->impl.min_access_size,
1263 mr->ops->impl.max_access_size,
1264 memory_region_read_accessor,
1265 mr, attrs);
1266 } else if (mr->ops->read_with_attrs) {
1267 return access_with_adjusted_size(addr, pval, size,
1268 mr->ops->impl.min_access_size,
1269 mr->ops->impl.max_access_size,
1270 memory_region_read_with_attrs_accessor,
1271 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001272 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001273 return access_with_adjusted_size(addr, pval, size, 1, 4,
1274 memory_region_oldmmio_read_accessor,
1275 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001276 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001277}
1278
Peter Maydell3b643492015-04-26 16:49:23 +01001279MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1280 hwaddr addr,
1281 uint64_t *pval,
1282 unsigned size,
1283 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001284{
Peter Maydellcc05c432015-04-26 16:49:23 +01001285 MemTxResult r;
1286
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001287 if (!memory_region_access_valid(mr, addr, size, false)) {
1288 *pval = unassigned_mem_read(mr, addr, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001289 return MEMTX_DECODE_ERROR;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001290 }
Avi Kivitya621f382012-01-02 13:12:08 +02001291
Peter Maydellcc05c432015-04-26 16:49:23 +01001292 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001293 adjust_endianness(mr, pval, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001294 return r;
Avi Kivitya621f382012-01-02 13:12:08 +02001295}
1296
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001297/* Return true if an eventfd was signalled */
1298static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1299 hwaddr addr,
1300 uint64_t data,
1301 unsigned size,
1302 MemTxAttrs attrs)
1303{
1304 MemoryRegionIoeventfd ioeventfd = {
1305 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1306 .data = data,
1307 };
1308 unsigned i;
1309
1310 for (i = 0; i < mr->ioeventfd_nb; i++) {
1311 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1312 ioeventfd.e = mr->ioeventfds[i].e;
1313
1314 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
1315 event_notifier_set(ioeventfd.e);
1316 return true;
1317 }
1318 }
1319
1320 return false;
1321}
1322
Peter Maydell3b643492015-04-26 16:49:23 +01001323MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1324 hwaddr addr,
1325 uint64_t data,
1326 unsigned size,
1327 MemTxAttrs attrs)
Avi Kivitya621f382012-01-02 13:12:08 +02001328{
Avi Kivity897fa7c2011-11-13 13:05:27 +02001329 if (!memory_region_access_valid(mr, addr, size, true)) {
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001330 unassigned_mem_write(mr, addr, data, size);
Peter Maydellcc05c432015-04-26 16:49:23 +01001331 return MEMTX_DECODE_ERROR;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001332 }
1333
Avi Kivitya621f382012-01-02 13:12:08 +02001334 adjust_endianness(mr, &data, size);
1335
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03001336 if ((!kvm_eventfds_enabled()) &&
1337 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1338 return MEMTX_OK;
1339 }
1340
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001341 if (mr->ops->write) {
Peter Maydellcc05c432015-04-26 16:49:23 +01001342 return access_with_adjusted_size(addr, &data, size,
1343 mr->ops->impl.min_access_size,
1344 mr->ops->impl.max_access_size,
1345 memory_region_write_accessor, mr,
1346 attrs);
1347 } else if (mr->ops->write_with_attrs) {
1348 return
1349 access_with_adjusted_size(addr, &data, size,
1350 mr->ops->impl.min_access_size,
1351 mr->ops->impl.max_access_size,
1352 memory_region_write_with_attrs_accessor,
1353 mr, attrs);
Paolo Bonzinice5d2f32013-05-24 17:45:48 +02001354 } else {
Peter Maydellcc05c432015-04-26 16:49:23 +01001355 return access_with_adjusted_size(addr, &data, size, 1, 4,
1356 memory_region_oldmmio_write_accessor,
1357 mr, attrs);
Avi Kivity74901c32011-07-26 14:26:10 +03001358 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001359}
1360
Avi Kivity093bc2c2011-07-26 14:26:01 +03001361void memory_region_init_io(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001362 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001363 const MemoryRegionOps *ops,
1364 void *opaque,
1365 const char *name,
1366 uint64_t size)
1367{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001368 memory_region_init(mr, owner, name, size);
Pavel Fedin6d6d2ab2015-08-13 11:26:21 +01001369 mr->ops = ops ? ops : &unassigned_mem_ops;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001370 mr->opaque = opaque;
Avi Kivity14a3c102011-07-26 14:26:06 +03001371 mr->terminates = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001372}
1373
Peter Maydell1cfe48c2017-07-07 15:42:49 +01001374void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1375 Object *owner,
1376 const char *name,
1377 uint64_t size,
1378 Error **errp)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001379{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001380 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001381 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001382 mr->terminates = true;
Avi Kivity545e92e2011-08-08 19:58:48 +03001383 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001384 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001385 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001386}
1387
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001388void memory_region_init_resizeable_ram(MemoryRegion *mr,
1389 Object *owner,
1390 const char *name,
1391 uint64_t size,
1392 uint64_t max_size,
1393 void (*resized)(const char*,
1394 uint64_t length,
1395 void *host),
1396 Error **errp)
1397{
1398 memory_region_init(mr, owner, name, size);
1399 mr->ram = true;
1400 mr->terminates = true;
1401 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001402 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1403 mr, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001404 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Michael S. Tsirkin60786ef2014-11-17 00:24:36 +02001405}
1406
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001407#ifdef __linux__
1408void memory_region_init_ram_from_file(MemoryRegion *mr,
1409 struct Object *owner,
1410 const char *name,
1411 uint64_t size,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001412 bool share,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001413 const char *path,
1414 Error **errp)
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001415{
1416 memory_region_init(mr, owner, name, size);
1417 mr->ram = true;
1418 mr->terminates = true;
1419 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001420 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
Paolo Bonzini677e7802015-03-23 10:53:21 +01001421 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001422}
Marc-André Lureaufea617c2017-06-02 18:12:24 +04001423
1424void memory_region_init_ram_from_fd(MemoryRegion *mr,
1425 struct Object *owner,
1426 const char *name,
1427 uint64_t size,
1428 bool share,
1429 int fd,
1430 Error **errp)
1431{
1432 memory_region_init(mr, owner, name, size);
1433 mr->ram = true;
1434 mr->terminates = true;
1435 mr->destructor = memory_region_destructor_ram;
1436 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp);
1437 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1438}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001439#endif
1440
Avi Kivity093bc2c2011-07-26 14:26:01 +03001441void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001442 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001443 const char *name,
1444 uint64_t size,
1445 void *ptr)
1446{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001447 memory_region_init(mr, owner, name, size);
Avi Kivity8ea92522011-12-08 15:58:43 +02001448 mr->ram = true;
Avi Kivity14a3c102011-07-26 14:26:06 +03001449 mr->terminates = true;
Eduardo Habkostfc3e7662015-11-06 19:20:05 -02001450 mr->destructor = memory_region_destructor_ram;
Paolo Bonzini677e7802015-03-23 10:53:21 +01001451 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
Hu Taoef701d72014-09-09 13:27:54 +08001452
1453 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1454 assert(ptr != NULL);
Fam Zheng8e41fb62016-03-01 14:18:21 +08001455 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001456}
1457
Alex Williamson21e00fa2016-10-31 09:53:03 -06001458void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1459 Object *owner,
1460 const char *name,
1461 uint64_t size,
1462 void *ptr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301463{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001464 memory_region_init_ram_ptr(mr, owner, name, size, ptr);
1465 mr->ram_device = true;
Alex Williamson4a2e2422016-10-31 09:53:03 -06001466 mr->ops = &ram_device_mem_ops;
1467 mr->opaque = mr;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301468}
1469
Avi Kivity093bc2c2011-07-26 14:26:01 +03001470void memory_region_init_alias(MemoryRegion *mr,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001471 Object *owner,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001472 const char *name,
1473 MemoryRegion *orig,
Avi Kivitya8170e52012-10-23 12:30:10 +02001474 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001475 uint64_t size)
1476{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001477 memory_region_init(mr, owner, name, size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001478 mr->alias = orig;
1479 mr->alias_offset = offset;
1480}
1481
Peter Maydellb59821a2017-07-07 15:42:50 +01001482void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1483 struct Object *owner,
1484 const char *name,
1485 uint64_t size,
1486 Error **errp)
Peter Maydella1777f72016-07-04 13:06:35 +01001487{
1488 memory_region_init(mr, owner, name, size);
1489 mr->ram = true;
1490 mr->readonly = true;
1491 mr->terminates = true;
1492 mr->destructor = memory_region_destructor_ram;
1493 mr->ram_block = qemu_ram_alloc(size, mr, errp);
1494 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1495}
1496
Peter Maydellb59821a2017-07-07 15:42:50 +01001497void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1498 Object *owner,
1499 const MemoryRegionOps *ops,
1500 void *opaque,
1501 const char *name,
1502 uint64_t size,
1503 Error **errp)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001504{
Peter Maydell39e0b032016-07-04 13:06:35 +01001505 assert(ops);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001506 memory_region_init(mr, owner, name, size);
Avi Kivity7bc2b9c2011-08-25 14:56:14 +03001507 mr->ops = ops;
Avi Kivity75f59412011-08-26 00:35:15 +03001508 mr->opaque = opaque;
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001509 mr->terminates = true;
Avi Kivity75c578d2012-01-02 15:40:52 +02001510 mr->rom_device = true;
Paolo Bonzini58268c82016-09-14 11:05:59 +02001511 mr->destructor = memory_region_destructor_ram;
Fam Zheng8e41fb62016-03-01 14:18:21 +08001512 mr->ram_block = qemu_ram_alloc(size, mr, errp);
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001513}
1514
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001515void memory_region_init_iommu(void *_iommu_mr,
1516 size_t instance_size,
1517 const char *mrtypename,
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001518 Object *owner,
Avi Kivity30951152012-10-30 13:47:46 +02001519 const char *name,
1520 uint64_t size)
1521{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001522 struct IOMMUMemoryRegion *iommu_mr;
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001523 struct MemoryRegion *mr;
1524
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001525 object_initialize(_iommu_mr, instance_size, mrtypename);
1526 mr = MEMORY_REGION(_iommu_mr);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001527 memory_region_do_init(mr, owner, name, size);
1528 iommu_mr = IOMMU_MEMORY_REGION(mr);
Avi Kivity30951152012-10-30 13:47:46 +02001529 mr->terminates = true; /* then re-forwards */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001530 QLIST_INIT(&iommu_mr->iommu_notify);
1531 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
Avi Kivity30951152012-10-30 13:47:46 +02001532}
1533
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001534static void memory_region_finalize(Object *obj)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001535{
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001536 MemoryRegion *mr = MEMORY_REGION(obj);
1537
Paolo Bonzini2e2b8eb2015-10-01 10:59:50 +02001538 assert(!mr->container);
1539
1540 /* We know the region is not visible in any address space (it
1541 * does not have a container and cannot be a root either because
1542 * it has no references, so we can blindly clear mr->enabled.
1543 * memory_region_set_enabled instead could trigger a transaction
1544 * and cause an infinite loop.
1545 */
1546 mr->enabled = false;
1547 memory_region_transaction_begin();
1548 while (!QTAILQ_EMPTY(&mr->subregions)) {
1549 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1550 memory_region_del_subregion(mr, subregion);
1551 }
1552 memory_region_transaction_commit();
1553
Avi Kivity545e92e2011-08-08 19:58:48 +03001554 mr->destructor(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001555 memory_region_clear_coalescing(mr);
Peter Maydell302fa282014-08-19 20:05:46 +01001556 g_free((char *)mr->name);
Anthony Liguori7267c092011-08-20 22:09:37 -05001557 g_free(mr->ioeventfds);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001558}
1559
Paolo Bonzini803c0812013-05-07 06:59:09 +02001560Object *memory_region_owner(MemoryRegion *mr)
1561{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001562 Object *obj = OBJECT(mr);
1563 return obj->parent;
Paolo Bonzini803c0812013-05-07 06:59:09 +02001564}
1565
Paolo Bonzini46637be2013-05-07 09:06:00 +02001566void memory_region_ref(MemoryRegion *mr)
1567{
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001568 /* MMIO callbacks most likely will access data that belongs
1569 * to the owner, hence the need to ref/unref the owner whenever
1570 * the memory region is in use.
1571 *
1572 * The memory region is a child of its owner. As long as the
1573 * owner doesn't call unparent itself on the memory region,
1574 * ref-ing the owner will also keep the memory region alive.
Paolo Bonzini612263c2015-12-09 11:44:25 +01001575 * Memory regions without an owner are supposed to never go away;
1576 * we do not ref/unref them because it slows down DMA sensibly.
Paolo Bonzini22a893e2014-06-11 10:58:06 +02001577 */
Paolo Bonzini612263c2015-12-09 11:44:25 +01001578 if (mr && mr->owner) {
1579 object_ref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001580 }
1581}
1582
1583void memory_region_unref(MemoryRegion *mr)
1584{
Paolo Bonzini612263c2015-12-09 11:44:25 +01001585 if (mr && mr->owner) {
1586 object_unref(mr->owner);
Paolo Bonzini46637be2013-05-07 09:06:00 +02001587 }
1588}
1589
Avi Kivity093bc2c2011-07-26 14:26:01 +03001590uint64_t memory_region_size(MemoryRegion *mr)
1591{
Avi Kivity08dafab2011-10-16 13:19:17 +02001592 if (int128_eq(mr->size, int128_2_64())) {
1593 return UINT64_MAX;
1594 }
1595 return int128_get64(mr->size);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001596}
1597
Peter Crosthwaite5d546d42014-08-14 23:55:03 -07001598const char *memory_region_name(const MemoryRegion *mr)
Avi Kivity8991c792011-12-20 15:53:11 +02001599{
Peter Crosthwaited1dd32a2014-08-25 20:10:24 -07001600 if (!mr->name) {
1601 ((MemoryRegion *)mr)->name =
1602 object_get_canonical_path_component(OBJECT(mr));
1603 }
Peter Maydell302fa282014-08-19 20:05:46 +01001604 return mr->name;
Avi Kivity8991c792011-12-20 15:53:11 +02001605}
1606
Alex Williamson21e00fa2016-10-31 09:53:03 -06001607bool memory_region_is_ram_device(MemoryRegion *mr)
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301608{
Alex Williamson21e00fa2016-10-31 09:53:03 -06001609 return mr->ram_device;
Nikunj A Dadhaniae4dc3f52014-09-15 09:28:23 +05301610}
1611
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001612uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
Avi Kivity55043ba2011-12-15 17:20:34 +02001613{
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001614 uint8_t mask = mr->dirty_log_mask;
Paolo Bonziniadaad612016-09-22 16:09:08 +02001615 if (global_dirty_log && mr->ram_block) {
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01001616 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1617 }
1618 return mask;
Avi Kivity55043ba2011-12-15 17:20:34 +02001619}
1620
Paolo Bonzini2d1a35b2015-03-23 10:50:57 +01001621bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1622{
1623 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1624}
1625
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001626static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
Peter Xu5bf3d312016-09-23 13:02:27 +08001627{
1628 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1629 IOMMUNotifier *iommu_notifier;
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001630 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Peter Xu5bf3d312016-09-23 13:02:27 +08001631
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001632 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xu5bf3d312016-09-23 13:02:27 +08001633 flags |= iommu_notifier->notifier_flags;
1634 }
1635
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001636 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1637 imrc->notify_flag_changed(iommu_mr,
1638 iommu_mr->iommu_notify_flags,
1639 flags);
Peter Xu5bf3d312016-09-23 13:02:27 +08001640 }
1641
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001642 iommu_mr->iommu_notify_flags = flags;
Peter Xu5bf3d312016-09-23 13:02:27 +08001643}
1644
Peter Xucdb30812016-09-23 13:02:26 +08001645void memory_region_register_iommu_notifier(MemoryRegion *mr,
1646 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001647{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001648 IOMMUMemoryRegion *iommu_mr;
1649
Jason Wangefcd38c2016-12-30 18:09:17 +08001650 if (mr->alias) {
1651 memory_region_register_iommu_notifier(mr->alias, n);
1652 return;
1653 }
1654
Peter Xucdb30812016-09-23 13:02:26 +08001655 /* We need to register for at least one bitfield */
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001656 iommu_mr = IOMMU_MEMORY_REGION(mr);
Peter Xucdb30812016-09-23 13:02:26 +08001657 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
Peter Xu698feb52017-04-07 18:59:07 +08001658 assert(n->start <= n->end);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001659 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1660 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001661}
1662
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001663uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
David Gibsona788f222015-09-30 12:13:55 +10001664{
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001665 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1666
1667 if (imrc->get_min_page_size) {
1668 return imrc->get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001669 }
1670 return TARGET_PAGE_SIZE;
1671}
1672
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001673void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001674{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001675 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001676 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001677 hwaddr addr, granularity;
David Gibsona788f222015-09-30 12:13:55 +10001678 IOMMUTLBEntry iotlb;
1679
Peter Xufaa362e2017-04-07 18:59:11 +08001680 /* If the IOMMU has its own replay callback, override */
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001681 if (imrc->replay) {
1682 imrc->replay(iommu_mr, n);
Peter Xufaa362e2017-04-07 18:59:11 +08001683 return;
1684 }
1685
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001686 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
Alexey Kardashevskiyf682e9c2016-06-21 11:14:01 +10001687
David Gibsona788f222015-09-30 12:13:55 +10001688 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10001689 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE);
David Gibsona788f222015-09-30 12:13:55 +10001690 if (iotlb.perm != IOMMU_NONE) {
1691 n->notify(n, &iotlb);
1692 }
1693
1694 /* if (2^64 - MR size) < granularity, it's possible to get an
1695 * infinite loop here. This should catch such a wraparound */
1696 if ((addr + granularity) < addr) {
1697 break;
1698 }
1699 }
1700}
1701
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001702void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
Peter Xude472e42017-04-07 18:59:09 +08001703{
1704 IOMMUNotifier *notifier;
1705
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001706 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1707 memory_region_iommu_replay(iommu_mr, notifier);
Peter Xude472e42017-04-07 18:59:09 +08001708 }
1709}
1710
Peter Xucdb30812016-09-23 13:02:26 +08001711void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1712 IOMMUNotifier *n)
David Gibson06866572013-05-14 19:13:56 +10001713{
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001714 IOMMUMemoryRegion *iommu_mr;
1715
Jason Wangefcd38c2016-12-30 18:09:17 +08001716 if (mr->alias) {
1717 memory_region_unregister_iommu_notifier(mr->alias, n);
1718 return;
1719 }
Peter Xucdb30812016-09-23 13:02:26 +08001720 QLIST_REMOVE(n, node);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001721 iommu_mr = IOMMU_MEMORY_REGION(mr);
1722 memory_region_update_iommu_notify_flags(iommu_mr);
David Gibson06866572013-05-14 19:13:56 +10001723}
1724
Peter Xubd2bfa42017-04-07 18:59:10 +08001725void memory_region_notify_one(IOMMUNotifier *notifier,
1726 IOMMUTLBEntry *entry)
David Gibson06866572013-05-14 19:13:56 +10001727{
Peter Xucdb30812016-09-23 13:02:26 +08001728 IOMMUNotifierFlag request_flags;
1729
Peter Xubd2bfa42017-04-07 18:59:10 +08001730 /*
1731 * Skip the notification if the notification does not overlap
1732 * with registered range.
1733 */
1734 if (notifier->start > entry->iova + entry->addr_mask + 1 ||
1735 notifier->end < entry->iova) {
1736 return;
1737 }
Peter Xucdb30812016-09-23 13:02:26 +08001738
Peter Xubd2bfa42017-04-07 18:59:10 +08001739 if (entry->perm & IOMMU_RW) {
Peter Xucdb30812016-09-23 13:02:26 +08001740 request_flags = IOMMU_NOTIFIER_MAP;
1741 } else {
1742 request_flags = IOMMU_NOTIFIER_UNMAP;
1743 }
1744
Peter Xubd2bfa42017-04-07 18:59:10 +08001745 if (notifier->notifier_flags & request_flags) {
1746 notifier->notify(notifier, entry);
1747 }
1748}
1749
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001750void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
Peter Xubd2bfa42017-04-07 18:59:10 +08001751 IOMMUTLBEntry entry)
1752{
1753 IOMMUNotifier *iommu_notifier;
1754
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001755 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
Peter Xubd2bfa42017-04-07 18:59:10 +08001756
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10001757 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
Peter Xubd2bfa42017-04-07 18:59:10 +08001758 memory_region_notify_one(iommu_notifier, &entry);
Peter Xucdb30812016-09-23 13:02:26 +08001759 }
David Gibson06866572013-05-14 19:13:56 +10001760}
1761
Avi Kivity093bc2c2011-07-26 14:26:01 +03001762void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
1763{
Avi Kivity5a583342011-07-26 14:26:02 +03001764 uint8_t mask = 1 << client;
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001765 uint8_t old_logging;
Avi Kivity5a583342011-07-26 14:26:02 +03001766
Paolo Bonzinidbddac62015-03-23 10:31:53 +01001767 assert(client == DIRTY_MEMORY_VGA);
Paolo Bonzinideb809e2015-07-14 13:56:53 +02001768 old_logging = mr->vga_logging_count;
1769 mr->vga_logging_count += log ? 1 : -1;
1770 if (!!old_logging == !!mr->vga_logging_count) {
1771 return;
1772 }
1773
Jan Kiszka59023ef2012-08-23 13:02:30 +02001774 memory_region_transaction_begin();
Avi Kivity5a583342011-07-26 14:26:02 +03001775 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
Jan Kiszka22bde712012-11-05 16:45:56 +01001776 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001777 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001778}
1779
Avi Kivitya8170e52012-10-23 12:30:10 +02001780bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1781 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001782{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001783 assert(mr->ram_block);
1784 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr,
1785 size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001786}
1787
Avi Kivitya8170e52012-10-23 12:30:10 +02001788void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1789 hwaddr size)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001790{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001791 assert(mr->ram_block);
1792 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
1793 size,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001794 memory_region_get_dirty_log_mask(mr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001795}
1796
Juan Quintela6c279db2012-10-17 20:24:28 +02001797bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1798 hwaddr size, unsigned client)
1799{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001800 assert(mr->ram_block);
1801 return cpu_physical_memory_test_and_clear_dirty(
1802 memory_region_get_ram_addr(mr) + addr, size, client);
Juan Quintela6c279db2012-10-17 20:24:28 +02001803}
1804
Gerd Hoffmann8deaf122017-04-21 11:16:25 +02001805DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1806 hwaddr addr,
1807 hwaddr size,
1808 unsigned client)
1809{
1810 assert(mr->ram_block);
1811 return cpu_physical_memory_snapshot_and_clear_dirty(
1812 memory_region_get_ram_addr(mr) + addr, size, client);
1813}
1814
1815bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
1816 hwaddr addr, hwaddr size)
1817{
1818 assert(mr->ram_block);
1819 return cpu_physical_memory_snapshot_get_dirty(snap,
1820 memory_region_get_ram_addr(mr) + addr, size);
1821}
Juan Quintela6c279db2012-10-17 20:24:28 +02001822
Avi Kivity093bc2c2011-07-26 14:26:01 +03001823void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
1824{
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001825 MemoryListener *listener;
Avi Kivity0d673e32012-10-02 15:28:50 +02001826 AddressSpace *as;
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001827 FlatView *view;
Avi Kivity5a583342011-07-26 14:26:02 +03001828 FlatRange *fr;
1829
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001830 /* If the same address space has multiple log_sync listeners, we
1831 * visit that address space's FlatView multiple times. But because
1832 * log_sync listeners are rare, it's still cheaper than walking each
1833 * address space once.
1834 */
1835 QTAILQ_FOREACH(listener, &memory_listeners, link) {
1836 if (!listener->log_sync) {
1837 continue;
1838 }
1839 as = listener->address_space;
1840 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001841 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity0d673e32012-10-02 15:28:50 +02001842 if (fr->mr == mr) {
Paolo Bonzini0a752ee2016-09-23 11:08:54 +02001843 MemoryRegionSection mrs = section_from_flat_range(fr, as);
1844 listener->log_sync(listener, &mrs);
Avi Kivity0d673e32012-10-02 15:28:50 +02001845 }
Avi Kivity5a583342011-07-26 14:26:02 +03001846 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001847 flatview_unref(view);
Avi Kivity5a583342011-07-26 14:26:02 +03001848 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001849}
1850
1851void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
1852{
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001853 if (mr->readonly != readonly) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001854 memory_region_transaction_begin();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001855 mr->readonly = readonly;
Jan Kiszka22bde712012-11-05 16:45:56 +01001856 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001857 memory_region_transaction_commit();
Avi Kivityfb1cd6f2011-09-25 14:48:47 +03001858 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03001859}
1860
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001861void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001862{
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001863 if (mr->romd_mode != romd_mode) {
Jan Kiszka59023ef2012-08-23 13:02:30 +02001864 memory_region_transaction_begin();
Jan Kiszka5f9a5ea2013-05-07 19:04:25 +02001865 mr->romd_mode = romd_mode;
Jan Kiszka22bde712012-11-05 16:45:56 +01001866 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02001867 memory_region_transaction_commit();
Avi Kivityd0a9b5b2011-08-08 19:58:49 +03001868 }
1869}
1870
Avi Kivitya8170e52012-10-23 12:30:10 +02001871void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1872 hwaddr size, unsigned client)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001873{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001874 assert(mr->ram_block);
1875 cpu_physical_memory_test_and_clear_dirty(
1876 memory_region_get_ram_addr(mr) + addr, size, client);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001877}
1878
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001879int memory_region_get_fd(MemoryRegion *mr)
1880{
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001881 int fd;
1882
1883 rcu_read_lock();
1884 while (mr->alias) {
1885 mr = mr->alias;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001886 }
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001887 fd = mr->ram_block->fd;
1888 rcu_read_unlock();
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001889
Paolo Bonzini4ff87572016-03-25 12:30:16 +01001890 return fd;
1891}
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001892
Avi Kivity093bc2c2011-07-26 14:26:01 +03001893void *memory_region_get_ram_ptr(MemoryRegion *mr)
1894{
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001895 void *ptr;
1896 uint64_t offset = 0;
1897
1898 rcu_read_lock();
1899 while (mr->alias) {
1900 offset += mr->alias_offset;
1901 mr = mr->alias;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001902 }
Fam Zheng8e41fb62016-03-01 14:18:21 +08001903 assert(mr->ram_block);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001904 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001905 rcu_read_unlock();
Avi Kivity093bc2c2011-07-26 14:26:01 +03001906
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001907 return ptr;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001908}
1909
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001910MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
1911{
1912 RAMBlock *block;
1913
1914 block = qemu_ram_block_from_host(ptr, false, offset);
1915 if (!block) {
1916 return NULL;
1917 }
1918
1919 return block->mr;
1920}
1921
Fam Zheng7ebb2742016-03-01 14:18:20 +08001922ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
1923{
1924 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
1925}
1926
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001927void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
1928{
Fam Zheng8e41fb62016-03-01 14:18:21 +08001929 assert(mr->ram_block);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001930
Gongleifa53a0e2016-05-10 10:04:59 +08001931 qemu_ram_resize(mr->ram_block, newsize, errp);
Paolo Bonzini37d7c082015-03-23 10:21:46 +01001932}
1933
Avi Kivity0d673e32012-10-02 15:28:50 +02001934static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
Avi Kivity093bc2c2011-07-26 14:26:01 +03001935{
Paolo Bonzini99e86342013-05-06 10:26:13 +02001936 FlatView *view;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001937 FlatRange *fr;
1938 CoalescedMemoryRange *cmr;
1939 AddrRange tmp;
Avi Kivity95d29942012-10-02 18:21:54 +02001940 MemoryRegionSection section;
Avi Kivity093bc2c2011-07-26 14:26:01 +03001941
Paolo Bonzini856d7242013-05-06 11:57:21 +02001942 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02001943 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03001944 if (fr->mr == mr) {
Avi Kivity95d29942012-10-02 18:21:54 +02001945 section = (MemoryRegionSection) {
Avi Kivityf6790af2012-10-02 20:13:51 +02001946 .address_space = as,
Avi Kivity95d29942012-10-02 18:21:54 +02001947 .offset_within_address_space = int128_get64(fr->addr.start),
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001948 .size = fr->addr.size,
Avi Kivity95d29942012-10-02 18:21:54 +02001949 };
1950
Paolo Bonzini9a546352016-09-22 16:23:06 +02001951 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001952 int128_get64(fr->addr.start),
1953 int128_get64(fr->addr.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001954 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
1955 tmp = addrrange_shift(cmr->addr,
Avi Kivity08dafab2011-10-16 13:19:17 +02001956 int128_sub(fr->addr.start,
1957 int128_make64(fr->offset_in_region)));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001958 if (!addrrange_intersects(tmp, fr->addr)) {
1959 continue;
1960 }
1961 tmp = addrrange_intersection(tmp, fr->addr);
Paolo Bonzini9a546352016-09-22 16:23:06 +02001962 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section,
Avi Kivity95d29942012-10-02 18:21:54 +02001963 int128_get64(tmp.start),
1964 int128_get64(tmp.size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001965 }
1966 }
1967 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02001968 flatview_unref(view);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001969}
1970
Avi Kivity0d673e32012-10-02 15:28:50 +02001971static void memory_region_update_coalesced_range(MemoryRegion *mr)
1972{
1973 AddressSpace *as;
1974
1975 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1976 memory_region_update_coalesced_range_as(mr, as);
1977 }
1978}
1979
Avi Kivity093bc2c2011-07-26 14:26:01 +03001980void memory_region_set_coalescing(MemoryRegion *mr)
1981{
1982 memory_region_clear_coalescing(mr);
Avi Kivity08dafab2011-10-16 13:19:17 +02001983 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001984}
1985
1986void memory_region_add_coalescing(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001987 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03001988 uint64_t size)
1989{
Anthony Liguori7267c092011-08-20 22:09:37 -05001990 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001991
Avi Kivity08dafab2011-10-16 13:19:17 +02001992 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
Avi Kivity093bc2c2011-07-26 14:26:01 +03001993 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
1994 memory_region_update_coalesced_range(mr);
Jan Kiszkad4105152012-08-23 13:02:29 +02001995 memory_region_set_flush_coalesced(mr);
Avi Kivity093bc2c2011-07-26 14:26:01 +03001996}
1997
1998void memory_region_clear_coalescing(MemoryRegion *mr)
1999{
2000 CoalescedMemoryRange *cmr;
Fam Zhengab5b3db2014-06-13 14:34:41 +08002001 bool updated = false;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002002
Jan Kiszkad4105152012-08-23 13:02:29 +02002003 qemu_flush_coalesced_mmio_buffer();
2004 mr->flush_coalesced_mmio = false;
2005
Avi Kivity093bc2c2011-07-26 14:26:01 +03002006 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2007 cmr = QTAILQ_FIRST(&mr->coalesced);
2008 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002009 g_free(cmr);
Fam Zhengab5b3db2014-06-13 14:34:41 +08002010 updated = true;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002011 }
Fam Zhengab5b3db2014-06-13 14:34:41 +08002012
2013 if (updated) {
2014 memory_region_update_coalesced_range(mr);
2015 }
Avi Kivity093bc2c2011-07-26 14:26:01 +03002016}
2017
Jan Kiszkad4105152012-08-23 13:02:29 +02002018void memory_region_set_flush_coalesced(MemoryRegion *mr)
2019{
2020 mr->flush_coalesced_mmio = true;
2021}
2022
2023void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2024{
2025 qemu_flush_coalesced_mmio_buffer();
2026 if (QTAILQ_EMPTY(&mr->coalesced)) {
2027 mr->flush_coalesced_mmio = false;
2028 }
2029}
2030
Jan Kiszka196ea132015-06-18 18:47:20 +02002031void memory_region_set_global_locking(MemoryRegion *mr)
2032{
2033 mr->global_locking = true;
2034}
2035
2036void memory_region_clear_global_locking(MemoryRegion *mr)
2037{
2038 mr->global_locking = false;
2039}
2040
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002041static bool userspace_eventfd_warning;
2042
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002043void memory_region_add_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002044 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002045 unsigned size,
2046 bool match_data,
2047 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002048 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002049{
2050 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002051 .addr.start = int128_make64(addr),
2052 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002053 .match_data = match_data,
2054 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002055 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002056 };
2057 unsigned i;
2058
Pavel Fedin8c56c1a2015-11-20 12:37:16 +03002059 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2060 userspace_eventfd_warning))) {
2061 userspace_eventfd_warning = true;
2062 error_report("Using eventfd without MMIO binding in KVM. "
2063 "Suboptimal performance expected");
2064 }
2065
Jason Wangb8aecea2015-11-06 16:02:45 +08002066 if (size) {
2067 adjust_endianness(mr, &mrfd.data, size);
2068 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002069 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002070 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2071 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
2072 break;
2073 }
2074 }
2075 ++mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002076 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002077 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2078 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2079 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2080 mr->ioeventfds[i] = mrfd;
Gonglei4dc56152014-05-08 11:47:32 +08002081 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002082 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002083}
2084
2085void memory_region_del_eventfd(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002086 hwaddr addr,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002087 unsigned size,
2088 bool match_data,
2089 uint64_t data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002090 EventNotifier *e)
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002091{
2092 MemoryRegionIoeventfd mrfd = {
Avi Kivity08dafab2011-10-16 13:19:17 +02002093 .addr.start = int128_make64(addr),
2094 .addr.size = int128_make64(size),
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002095 .match_data = match_data,
2096 .data = data,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02002097 .e = e,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002098 };
2099 unsigned i;
2100
Jason Wangb8aecea2015-11-06 16:02:45 +08002101 if (size) {
2102 adjust_endianness(mr, &mrfd.data, size);
2103 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002104 memory_region_transaction_begin();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002105 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2106 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
2107 break;
2108 }
2109 }
2110 assert(i != mr->ioeventfd_nb);
2111 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2112 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2113 --mr->ioeventfd_nb;
Anthony Liguori7267c092011-08-20 22:09:37 -05002114 mr->ioeventfds = g_realloc(mr->ioeventfds,
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002115 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
Gonglei4dc56152014-05-08 11:47:32 +08002116 ioeventfd_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002117 memory_region_transaction_commit();
Avi Kivity3e9d69e2011-07-26 14:26:11 +03002118}
2119
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002120static void memory_region_update_container_subregions(MemoryRegion *subregion)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002121{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002122 MemoryRegion *mr = subregion->container;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002123 MemoryRegion *other;
2124
Jan Kiszka59023ef2012-08-23 13:02:30 +02002125 memory_region_transaction_begin();
2126
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002127 memory_region_ref(subregion);
Avi Kivity093bc2c2011-07-26 14:26:01 +03002128 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
Avi Kivity093bc2c2011-07-26 14:26:01 +03002129 if (subregion->priority >= other->priority) {
2130 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2131 goto done;
2132 }
2133 }
2134 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2135done:
Jan Kiszka22bde712012-11-05 16:45:56 +01002136 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002137 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002138}
2139
Peter Crosthwaite05987012014-06-05 23:14:44 -07002140static void memory_region_add_subregion_common(MemoryRegion *mr,
2141 hwaddr offset,
2142 MemoryRegion *subregion)
2143{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002144 assert(!subregion->container);
2145 subregion->container = mr;
Peter Crosthwaite05987012014-06-05 23:14:44 -07002146 subregion->addr = offset;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002147 memory_region_update_container_subregions(subregion);
Peter Crosthwaite05987012014-06-05 23:14:44 -07002148}
Avi Kivity093bc2c2011-07-26 14:26:01 +03002149
2150void memory_region_add_subregion(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002151 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002152 MemoryRegion *subregion)
2153{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002154 subregion->priority = 0;
2155 memory_region_add_subregion_common(mr, offset, subregion);
2156}
2157
2158void memory_region_add_subregion_overlap(MemoryRegion *mr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002159 hwaddr offset,
Avi Kivity093bc2c2011-07-26 14:26:01 +03002160 MemoryRegion *subregion,
Marcel Apfelbauma1ff8ae2013-09-16 11:21:14 +03002161 int priority)
Avi Kivity093bc2c2011-07-26 14:26:01 +03002162{
Avi Kivity093bc2c2011-07-26 14:26:01 +03002163 subregion->priority = priority;
2164 memory_region_add_subregion_common(mr, offset, subregion);
2165}
2166
2167void memory_region_del_subregion(MemoryRegion *mr,
2168 MemoryRegion *subregion)
2169{
Jan Kiszka59023ef2012-08-23 13:02:30 +02002170 memory_region_transaction_begin();
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002171 assert(subregion->container == mr);
2172 subregion->container = NULL;
Avi Kivity093bc2c2011-07-26 14:26:01 +03002173 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02002174 memory_region_unref(subregion);
Jan Kiszka22bde712012-11-05 16:45:56 +01002175 memory_region_update_pending |= mr->enabled && subregion->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002176 memory_region_transaction_commit();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002177}
2178
2179void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2180{
2181 if (enabled == mr->enabled) {
2182 return;
2183 }
Jan Kiszka59023ef2012-08-23 13:02:30 +02002184 memory_region_transaction_begin();
Avi Kivity6bba19b2011-09-14 11:54:58 +03002185 mr->enabled = enabled;
Jan Kiszka22bde712012-11-05 16:45:56 +01002186 memory_region_update_pending = true;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002187 memory_region_transaction_commit();
Avi Kivity093bc2c2011-07-26 14:26:01 +03002188}
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002189
Michael S. Tsirkine7af4c62014-12-16 11:21:23 +02002190void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2191{
2192 Int128 s = int128_make64(size);
2193
2194 if (size == UINT64_MAX) {
2195 s = int128_2_64();
2196 }
2197 if (int128_eq(s, mr->size)) {
2198 return;
2199 }
2200 memory_region_transaction_begin();
2201 mr->size = s;
2202 memory_region_update_pending = true;
2203 memory_region_transaction_commit();
2204}
2205
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002206static void memory_region_readd_subregion(MemoryRegion *mr)
Avi Kivity2282e1a2011-09-14 12:10:12 +03002207{
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002208 MemoryRegion *container = mr->container;
Avi Kivity2282e1a2011-09-14 12:10:12 +03002209
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002210 if (container) {
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002211 memory_region_transaction_begin();
2212 memory_region_ref(mr);
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002213 memory_region_del_subregion(container, mr);
2214 mr->container = container;
2215 memory_region_update_container_subregions(mr);
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002216 memory_region_unref(mr);
2217 memory_region_transaction_commit();
Avi Kivity2282e1a2011-09-14 12:10:12 +03002218 }
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002219}
Avi Kivity2282e1a2011-09-14 12:10:12 +03002220
Peter Crosthwaite67891b82014-06-05 23:15:18 -07002221void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2222{
2223 if (addr != mr->addr) {
2224 mr->addr = addr;
2225 memory_region_readd_subregion(mr);
2226 }
Avi Kivity2282e1a2011-09-14 12:10:12 +03002227}
2228
Avi Kivitya8170e52012-10-23 12:30:10 +02002229void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
Avi Kivity47033592011-12-04 19:16:50 +02002230{
Avi Kivity47033592011-12-04 19:16:50 +02002231 assert(mr->alias);
Avi Kivity47033592011-12-04 19:16:50 +02002232
Jan Kiszka59023ef2012-08-23 13:02:30 +02002233 if (offset == mr->alias_offset) {
Avi Kivity47033592011-12-04 19:16:50 +02002234 return;
2235 }
2236
Jan Kiszka59023ef2012-08-23 13:02:30 +02002237 memory_region_transaction_begin();
2238 mr->alias_offset = offset;
Jan Kiszka22bde712012-11-05 16:45:56 +01002239 memory_region_update_pending |= mr->enabled;
Jan Kiszka59023ef2012-08-23 13:02:30 +02002240 memory_region_transaction_commit();
Avi Kivity47033592011-12-04 19:16:50 +02002241}
2242
Igor Mammedova2b257d2014-10-31 16:38:37 +00002243uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2244{
2245 return mr->align;
2246}
2247
Avi Kivitye2177952011-12-08 15:00:18 +02002248static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2249{
2250 const AddrRange *addr = addr_;
2251 const FlatRange *fr = fr_;
2252
2253 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2254 return -1;
2255 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2256 return 1;
2257 }
2258 return 0;
2259}
2260
Paolo Bonzini99e86342013-05-06 10:26:13 +02002261static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
Avi Kivitye2177952011-12-08 15:00:18 +02002262{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002263 return bsearch(&addr, view->ranges, view->nr,
Avi Kivitye2177952011-12-08 15:00:18 +02002264 sizeof(FlatRange), cmp_flatrange_addr);
2265}
2266
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002267bool memory_region_is_mapped(MemoryRegion *mr)
2268{
2269 return mr->container ? true : false;
2270}
2271
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002272/* Same as memory_region_find, but it does not add a reference to the
2273 * returned region. It must be called from an RCU critical section.
2274 */
2275static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2276 hwaddr addr, uint64_t size)
Avi Kivitye2177952011-12-08 15:00:18 +02002277{
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002278 MemoryRegionSection ret = { .mr = NULL };
Paolo Bonzini73034e92013-05-07 15:48:28 +02002279 MemoryRegion *root;
2280 AddressSpace *as;
2281 AddrRange range;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002282 FlatView *view;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002283 FlatRange *fr;
Avi Kivitye2177952011-12-08 15:00:18 +02002284
Paolo Bonzini73034e92013-05-07 15:48:28 +02002285 addr += mr->addr;
Paolo Bonzinifeca4ac2014-06-11 11:18:09 +02002286 for (root = mr; root->container; ) {
2287 root = root->container;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002288 addr += root->addr;
2289 }
2290
2291 as = memory_region_to_address_space(root);
Igor Mammedoveed2bac2014-06-02 15:25:06 +02002292 if (!as) {
2293 return ret;
2294 }
Paolo Bonzini73034e92013-05-07 15:48:28 +02002295 range = addrrange_make(int128_make64(addr), int128_make64(size));
Paolo Bonzini99e86342013-05-06 10:26:13 +02002296
Paolo Bonzini2b647662013-05-17 12:40:44 +02002297 view = atomic_rcu_read(&as->current_map);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002298 fr = flatview_lookup(view, range);
Avi Kivitye2177952011-12-08 15:00:18 +02002299 if (!fr) {
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002300 return ret;
Avi Kivitye2177952011-12-08 15:00:18 +02002301 }
2302
Paolo Bonzini99e86342013-05-06 10:26:13 +02002303 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
Avi Kivitye2177952011-12-08 15:00:18 +02002304 --fr;
2305 }
2306
2307 ret.mr = fr->mr;
Paolo Bonzini73034e92013-05-07 15:48:28 +02002308 ret.address_space = as;
Avi Kivitye2177952011-12-08 15:00:18 +02002309 range = addrrange_intersection(range, fr->addr);
2310 ret.offset_within_region = fr->offset_in_region;
2311 ret.offset_within_region += int128_get64(int128_sub(range.start,
2312 fr->addr.start));
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002313 ret.size = range.size;
Avi Kivitye2177952011-12-08 15:00:18 +02002314 ret.offset_within_address_space = int128_get64(range.start);
Avi Kivity7a8499e2012-02-08 17:01:23 +02002315 ret.readonly = fr->readonly;
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002316 return ret;
2317}
2318
2319MemoryRegionSection memory_region_find(MemoryRegion *mr,
2320 hwaddr addr, uint64_t size)
2321{
2322 MemoryRegionSection ret;
2323 rcu_read_lock();
2324 ret = memory_region_find_rcu(mr, addr, size);
2325 if (ret.mr) {
2326 memory_region_ref(ret.mr);
2327 }
Paolo Bonzini2b647662013-05-17 12:40:44 +02002328 rcu_read_unlock();
Avi Kivitye2177952011-12-08 15:00:18 +02002329 return ret;
2330}
2331
Paolo Bonzinic6742b12015-07-14 13:45:34 +02002332bool memory_region_present(MemoryRegion *container, hwaddr addr)
2333{
2334 MemoryRegion *mr;
2335
2336 rcu_read_lock();
2337 mr = memory_region_find_rcu(container, addr, 1).mr;
2338 rcu_read_unlock();
2339 return mr && mr != container;
2340}
2341
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002342void memory_global_dirty_log_sync(void)
Avi Kivity86e775c2011-12-15 16:24:49 +02002343{
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002344 MemoryListener *listener;
2345 AddressSpace *as;
Paolo Bonzini99e86342013-05-06 10:26:13 +02002346 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002347 FlatRange *fr;
2348
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002349 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2350 if (!listener->log_sync) {
2351 continue;
2352 }
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002353 as = listener->address_space;
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002354 view = address_space_get_flatview(as);
2355 FOR_EACH_FLAT_RANGE(fr, view) {
Paolo Bonziniadaad612016-09-22 16:09:08 +02002356 if (fr->dirty_log_mask) {
2357 MemoryRegionSection mrs = section_from_flat_range(fr, as);
2358 listener->log_sync(listener, &mrs);
2359 }
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02002360 }
2361 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002362 }
2363}
2364
Jay Zhou19310762017-07-28 18:28:53 +08002365static VMChangeStateEntry *vmstate_change;
2366
Avi Kivity7664e802011-12-11 14:47:25 +02002367void memory_global_dirty_log_start(void)
2368{
Jay Zhou19310762017-07-28 18:28:53 +08002369 if (vmstate_change) {
2370 qemu_del_vm_change_state_handler(vmstate_change);
2371 vmstate_change = NULL;
2372 }
2373
Avi Kivity7664e802011-12-11 14:47:25 +02002374 global_dirty_log = true;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002375
Avi Kivity7376e582012-02-08 21:05:17 +02002376 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002377
2378 /* Refresh DIRTY_LOG_MIGRATION bit. */
2379 memory_region_transaction_begin();
2380 memory_region_update_pending = true;
2381 memory_region_transaction_commit();
Avi Kivity7664e802011-12-11 14:47:25 +02002382}
2383
Jay Zhou19310762017-07-28 18:28:53 +08002384static void memory_global_dirty_log_do_stop(void)
Avi Kivity7664e802011-12-11 14:47:25 +02002385{
Avi Kivity7664e802011-12-11 14:47:25 +02002386 global_dirty_log = false;
Paolo Bonzini6f6a5ef2015-03-23 10:57:21 +01002387
2388 /* Refresh DIRTY_LOG_MIGRATION bit. */
2389 memory_region_transaction_begin();
2390 memory_region_update_pending = true;
2391 memory_region_transaction_commit();
2392
Avi Kivity7376e582012-02-08 21:05:17 +02002393 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
Avi Kivity7664e802011-12-11 14:47:25 +02002394}
2395
Jay Zhou19310762017-07-28 18:28:53 +08002396static void memory_vm_change_state_handler(void *opaque, int running,
2397 RunState state)
2398{
2399 if (running) {
2400 memory_global_dirty_log_do_stop();
2401
2402 if (vmstate_change) {
2403 qemu_del_vm_change_state_handler(vmstate_change);
2404 vmstate_change = NULL;
2405 }
2406 }
2407}
2408
2409void memory_global_dirty_log_stop(void)
2410{
2411 if (!runstate_is_running()) {
2412 if (vmstate_change) {
2413 return;
2414 }
2415 vmstate_change = qemu_add_vm_change_state_handler(
2416 memory_vm_change_state_handler, NULL);
2417 return;
2418 }
2419
2420 memory_global_dirty_log_do_stop();
2421}
2422
Avi Kivity7664e802011-12-11 14:47:25 +02002423static void listener_add_address_space(MemoryListener *listener,
2424 AddressSpace *as)
2425{
Paolo Bonzini99e86342013-05-06 10:26:13 +02002426 FlatView *view;
Avi Kivity7664e802011-12-11 14:47:25 +02002427 FlatRange *fr;
2428
Paolo Bonzini680a4782015-11-02 09:23:52 +01002429 if (listener->begin) {
2430 listener->begin(listener);
2431 }
Avi Kivity7664e802011-12-11 14:47:25 +02002432 if (global_dirty_log) {
Avi Kivity975aefe2012-10-02 16:39:57 +02002433 if (listener->log_global_start) {
2434 listener->log_global_start(listener);
2435 }
Avi Kivity7664e802011-12-11 14:47:25 +02002436 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002437
Paolo Bonzini856d7242013-05-06 11:57:21 +02002438 view = address_space_get_flatview(as);
Paolo Bonzini99e86342013-05-06 10:26:13 +02002439 FOR_EACH_FLAT_RANGE(fr, view) {
Avi Kivity7664e802011-12-11 14:47:25 +02002440 MemoryRegionSection section = {
2441 .mr = fr->mr,
Avi Kivityf6790af2012-10-02 20:13:51 +02002442 .address_space = as,
Avi Kivity7664e802011-12-11 14:47:25 +02002443 .offset_within_region = fr->offset_in_region,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002444 .size = fr->addr.size,
Avi Kivity7664e802011-12-11 14:47:25 +02002445 .offset_within_address_space = int128_get64(fr->addr.start),
Avi Kivity7a8499e2012-02-08 17:01:23 +02002446 .readonly = fr->readonly,
Avi Kivity7664e802011-12-11 14:47:25 +02002447 };
Paolo Bonzini680a4782015-11-02 09:23:52 +01002448 if (fr->dirty_log_mask && listener->log_start) {
2449 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2450 }
Avi Kivity975aefe2012-10-02 16:39:57 +02002451 if (listener->region_add) {
2452 listener->region_add(listener, &section);
2453 }
Avi Kivity7664e802011-12-11 14:47:25 +02002454 }
Paolo Bonzini680a4782015-11-02 09:23:52 +01002455 if (listener->commit) {
2456 listener->commit(listener);
2457 }
Paolo Bonzini856d7242013-05-06 11:57:21 +02002458 flatview_unref(view);
Avi Kivity7664e802011-12-11 14:47:25 +02002459}
2460
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002461void memory_listener_register(MemoryListener *listener, AddressSpace *as)
Avi Kivity7664e802011-12-11 14:47:25 +02002462{
Avi Kivity72e22d22012-02-08 15:05:50 +02002463 MemoryListener *other = NULL;
2464
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002465 listener->address_space = as;
Avi Kivity72e22d22012-02-08 15:05:50 +02002466 if (QTAILQ_EMPTY(&memory_listeners)
2467 || listener->priority >= QTAILQ_LAST(&memory_listeners,
2468 memory_listeners)->priority) {
2469 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2470 } else {
2471 QTAILQ_FOREACH(other, &memory_listeners, link) {
2472 if (listener->priority < other->priority) {
2473 break;
2474 }
2475 }
2476 QTAILQ_INSERT_BEFORE(other, listener, link);
2477 }
Avi Kivity0d673e32012-10-02 15:28:50 +02002478
Paolo Bonzini9a546352016-09-22 16:23:06 +02002479 if (QTAILQ_EMPTY(&as->listeners)
2480 || listener->priority >= QTAILQ_LAST(&as->listeners,
2481 memory_listeners)->priority) {
2482 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2483 } else {
2484 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2485 if (listener->priority < other->priority) {
2486 break;
2487 }
2488 }
2489 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2490 }
2491
Paolo Bonzinid45fa782016-09-22 16:11:54 +02002492 listener_add_address_space(listener, as);
Avi Kivity7664e802011-12-11 14:47:25 +02002493}
2494
2495void memory_listener_unregister(MemoryListener *listener)
2496{
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002497 if (!listener->address_space) {
2498 return;
2499 }
2500
Avi Kivity72e22d22012-02-08 15:05:50 +02002501 QTAILQ_REMOVE(&memory_listeners, listener, link);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002502 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
Paolo Bonzini1d8280c2017-01-27 16:40:12 +01002503 listener->address_space = NULL;
Avi Kivity86e775c2011-12-15 16:24:49 +02002504}
Avi Kivitye2177952011-12-08 15:00:18 +02002505
KONRAD Fredericc9356742016-10-19 15:06:49 +02002506bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr)
2507{
2508 void *host;
2509 unsigned size = 0;
2510 unsigned offset = 0;
2511 Object *new_interface;
2512
2513 if (!mr || !mr->ops->request_ptr) {
2514 return false;
2515 }
2516
2517 /*
2518 * Avoid an update if the request_ptr call
2519 * memory_region_invalidate_mmio_ptr which seems to be likely when we use
2520 * a cache.
2521 */
2522 memory_region_transaction_begin();
2523
2524 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset);
2525
2526 if (!host || !size) {
2527 memory_region_transaction_commit();
2528 return false;
2529 }
2530
2531 new_interface = object_new("mmio_interface");
2532 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset);
2533 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1);
2534 qdev_prop_set_bit(DEVICE(new_interface), "ro", true);
2535 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host);
2536 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr);
2537 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL);
2538
2539 memory_region_transaction_commit();
2540 return true;
2541}
2542
2543typedef struct MMIOPtrInvalidate {
2544 MemoryRegion *mr;
2545 hwaddr offset;
2546 unsigned size;
2547 int busy;
2548 int allocated;
2549} MMIOPtrInvalidate;
2550
2551#define MAX_MMIO_INVALIDATE 10
2552static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE];
2553
2554static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu,
2555 run_on_cpu_data data)
2556{
2557 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr;
2558 MemoryRegion *mr = invalidate_data->mr;
2559 hwaddr offset = invalidate_data->offset;
2560 unsigned size = invalidate_data->size;
2561 MemoryRegionSection section = memory_region_find(mr, offset, size);
2562
2563 qemu_mutex_lock_iothread();
2564
2565 /* Reset dirty so this doesn't happen later. */
2566 cpu_physical_memory_test_and_clear_dirty(offset, size, 1);
2567
2568 if (section.mr != mr) {
2569 /* memory_region_find add a ref on section.mr */
2570 memory_region_unref(section.mr);
2571 if (MMIO_INTERFACE(section.mr->owner)) {
2572 /* We found the interface just drop it. */
2573 object_property_set_bool(section.mr->owner, false, "realized",
2574 NULL);
2575 object_unref(section.mr->owner);
2576 object_unparent(section.mr->owner);
2577 }
2578 }
2579
2580 qemu_mutex_unlock_iothread();
2581
2582 if (invalidate_data->allocated) {
2583 g_free(invalidate_data);
2584 } else {
2585 invalidate_data->busy = 0;
2586 }
2587}
2588
2589void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
2590 unsigned size)
2591{
2592 size_t i;
2593 MMIOPtrInvalidate *invalidate_data = NULL;
2594
2595 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) {
2596 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) {
2597 invalidate_data = &mmio_ptr_invalidate_list[i];
2598 break;
2599 }
2600 }
2601
2602 if (!invalidate_data) {
2603 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate));
2604 invalidate_data->allocated = 1;
2605 }
2606
2607 invalidate_data->mr = mr;
2608 invalidate_data->offset = offset;
2609 invalidate_data->size = size;
2610
2611 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr,
2612 RUN_ON_CPU_HOST_PTR(invalidate_data));
2613}
2614
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002615void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002616{
Paolo Bonziniac951902015-02-11 15:21:04 +01002617 memory_region_ref(root);
Jan Kiszka59023ef2012-08-23 13:02:30 +02002618 memory_region_transaction_begin();
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002619 as->ref_count = 1;
Avi Kivity8786db72012-10-02 13:53:41 +02002620 as->root = root;
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002621 as->malloced = false;
Avi Kivity8786db72012-10-02 13:53:41 +02002622 as->current_map = g_new(FlatView, 1);
2623 flatview_init(as->current_map);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002624 as->ioeventfd_nb = 0;
2625 as->ioeventfds = NULL;
Paolo Bonzini9a546352016-09-22 16:23:06 +02002626 QTAILQ_INIT(&as->listeners);
Avi Kivity0d673e32012-10-02 15:28:50 +02002627 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002628 as->name = g_strdup(name ? name : "anonymous");
Avi Kivityac1970f2012-10-03 16:22:53 +02002629 address_space_init_dispatch(as);
Paolo Bonzinif43793c2013-04-16 15:39:51 +02002630 memory_region_update_pending |= root->enabled;
2631 memory_region_transaction_commit();
Avi Kivity1c0ffa52011-07-26 14:26:04 +03002632}
Avi Kivity658b2222011-07-26 14:26:08 +03002633
Paolo Bonzini374f2982013-05-17 12:37:03 +02002634static void do_address_space_destroy(AddressSpace *as)
Avi Kivity83f3c252012-10-07 12:59:55 +02002635{
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002636 bool do_free = as->malloced;
David Gibson078c44f2014-05-30 12:59:00 -06002637
Avi Kivity83f3c252012-10-07 12:59:55 +02002638 address_space_destroy_dispatch(as);
Paolo Bonzini9a546352016-09-22 16:23:06 +02002639 assert(QTAILQ_EMPTY(&as->listeners));
David Gibson078c44f2014-05-30 12:59:00 -06002640
Paolo Bonzini856d7242013-05-06 11:57:21 +02002641 flatview_unref(as->current_map);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002642 g_free(as->name);
Avi Kivity4c19eb72012-10-30 13:47:44 +02002643 g_free(as->ioeventfds);
Paolo Bonziniac951902015-02-11 15:21:04 +01002644 memory_region_unref(as->root);
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002645 if (do_free) {
2646 g_free(as);
2647 }
2648}
2649
2650AddressSpace *address_space_init_shareable(MemoryRegion *root, const char *name)
2651{
2652 AddressSpace *as;
2653
2654 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2655 if (root == as->root && as->malloced) {
2656 as->ref_count++;
2657 return as;
2658 }
2659 }
2660
2661 as = g_malloc0(sizeof *as);
2662 address_space_init(as, root, name);
2663 as->malloced = true;
2664 return as;
Avi Kivity83f3c252012-10-07 12:59:55 +02002665}
2666
Paolo Bonzini374f2982013-05-17 12:37:03 +02002667void address_space_destroy(AddressSpace *as)
2668{
Paolo Bonziniac951902015-02-11 15:21:04 +01002669 MemoryRegion *root = as->root;
2670
Peter Crosthwaitef0c02d12016-01-21 14:15:06 +00002671 as->ref_count--;
2672 if (as->ref_count) {
2673 return;
2674 }
Paolo Bonzini374f2982013-05-17 12:37:03 +02002675 /* Flush out anything from MemoryListeners listening in on this */
2676 memory_region_transaction_begin();
2677 as->root = NULL;
2678 memory_region_transaction_commit();
2679 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002680 address_space_unregister(as);
Paolo Bonzini374f2982013-05-17 12:37:03 +02002681
2682 /* At this point, as->dispatch and as->current_map are dummy
2683 * entries that the guest should never use. Wait for the old
2684 * values to expire before freeing the data.
2685 */
Paolo Bonziniac951902015-02-11 15:21:04 +01002686 as->root = root;
Paolo Bonzini374f2982013-05-17 12:37:03 +02002687 call_rcu(as, do_address_space_destroy, rcu);
2688}
2689
Peter Xu4e831902017-01-16 16:40:04 +08002690static const char *memory_region_type(MemoryRegion *mr)
2691{
2692 if (memory_region_is_ram_device(mr)) {
2693 return "ramd";
2694 } else if (memory_region_is_romd(mr)) {
2695 return "romd";
2696 } else if (memory_region_is_rom(mr)) {
2697 return "rom";
2698 } else if (memory_region_is_ram(mr)) {
2699 return "ram";
2700 } else {
2701 return "i/o";
2702 }
2703}
2704
Blue Swirl314e2982011-09-11 20:22:05 +00002705typedef struct MemoryRegionList MemoryRegionList;
2706
2707struct MemoryRegionList {
2708 const MemoryRegion *mr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002709 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
Blue Swirl314e2982011-09-11 20:22:05 +00002710};
2711
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002712typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
Blue Swirl314e2982011-09-11 20:22:05 +00002713
Peter Xu4e831902017-01-16 16:40:04 +08002714#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2715 int128_sub((size), int128_one())) : 0)
2716#define MTREE_INDENT " "
2717
Blue Swirl314e2982011-09-11 20:22:05 +00002718static void mtree_print_mr(fprintf_function mon_printf, void *f,
2719 const MemoryRegion *mr, unsigned int level,
Avi Kivitya8170e52012-10-23 12:30:10 +02002720 hwaddr base,
Jan Kiszka9479c572011-09-27 15:00:41 +02002721 MemoryRegionListHead *alias_print_queue)
Blue Swirl314e2982011-09-11 20:22:05 +00002722{
Jan Kiszka9479c572011-09-27 15:00:41 +02002723 MemoryRegionList *new_ml, *ml, *next_ml;
2724 MemoryRegionListHead submr_print_queue;
Blue Swirl314e2982011-09-11 20:22:05 +00002725 const MemoryRegion *submr;
2726 unsigned int i;
Peter Xub31f8412017-03-14 20:56:27 +08002727 hwaddr cur_start, cur_end;
Blue Swirl314e2982011-09-11 20:22:05 +00002728
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002729 if (!mr) {
Blue Swirl314e2982011-09-11 20:22:05 +00002730 return;
2731 }
2732
2733 for (i = 0; i < level; i++) {
Peter Xu4e831902017-01-16 16:40:04 +08002734 mon_printf(f, MTREE_INDENT);
Blue Swirl314e2982011-09-11 20:22:05 +00002735 }
2736
Peter Xub31f8412017-03-14 20:56:27 +08002737 cur_start = base + mr->addr;
2738 cur_end = cur_start + MR_SIZE(mr->size);
2739
2740 /*
2741 * Try to detect overflow of memory region. This should never
2742 * happen normally. When it happens, we dump something to warn the
2743 * user who is observing this.
2744 */
2745 if (cur_start < base || cur_end < cur_start) {
2746 mon_printf(f, "[DETECTED OVERFLOW!] ");
2747 }
2748
Blue Swirl314e2982011-09-11 20:22:05 +00002749 if (mr->alias) {
2750 MemoryRegionList *ml;
2751 bool found = false;
2752
2753 /* check if the alias is already in the queue */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002754 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
Paolo Bonzinif54bb152013-12-11 12:51:46 +01002755 if (ml->mr == mr->alias) {
Blue Swirl314e2982011-09-11 20:22:05 +00002756 found = true;
2757 }
2758 }
2759
2760 if (!found) {
2761 ml = g_new(MemoryRegionList, 1);
2762 ml->mr = mr->alias;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002763 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
Blue Swirl314e2982011-09-11 20:22:05 +00002764 }
Jan Kiszka4896d742012-02-04 16:25:42 +01002765 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
Peter Xu4e831902017-01-16 16:40:04 +08002766 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002767 "-" TARGET_FMT_plx "%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002768 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002769 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002770 memory_region_type((MemoryRegion *)mr),
Peter Crosthwaite3fb18b42014-08-14 23:55:36 -07002771 memory_region_name(mr),
2772 memory_region_name(mr->alias),
Blue Swirl314e2982011-09-11 20:22:05 +00002773 mr->alias_offset,
Peter Xu4e831902017-01-16 16:40:04 +08002774 mr->alias_offset + MR_SIZE(mr->size),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002775 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002776 } else {
Jan Kiszka4896d742012-02-04 16:25:42 +01002777 mon_printf(f,
Peter Xu4e831902017-01-16 16:40:04 +08002778 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n",
Peter Xub31f8412017-03-14 20:56:27 +08002779 cur_start, cur_end,
Jan Kiszka4b474ba2011-09-27 15:00:31 +02002780 mr->priority,
Peter Xu4e831902017-01-16 16:40:04 +08002781 memory_region_type((MemoryRegion *)mr),
Gerd Hoffmannf8a9f722015-04-08 12:57:11 +02002782 memory_region_name(mr),
2783 mr->enabled ? "" : " [disabled]");
Blue Swirl314e2982011-09-11 20:22:05 +00002784 }
Jan Kiszka9479c572011-09-27 15:00:41 +02002785
2786 QTAILQ_INIT(&submr_print_queue);
2787
Blue Swirl314e2982011-09-11 20:22:05 +00002788 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002789 new_ml = g_new(MemoryRegionList, 1);
2790 new_ml->mr = submr;
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002791 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002792 if (new_ml->mr->addr < ml->mr->addr ||
2793 (new_ml->mr->addr == ml->mr->addr &&
2794 new_ml->mr->priority > ml->mr->priority)) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002795 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002796 new_ml = NULL;
2797 break;
2798 }
2799 }
2800 if (new_ml) {
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002801 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
Jan Kiszka9479c572011-09-27 15:00:41 +02002802 }
2803 }
2804
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002805 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
Peter Xub31f8412017-03-14 20:56:27 +08002806 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start,
Jan Kiszka9479c572011-09-27 15:00:41 +02002807 alias_print_queue);
2808 }
2809
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002810 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
Jan Kiszka9479c572011-09-27 15:00:41 +02002811 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002812 }
2813}
2814
Peter Xu57bb40c2017-01-16 16:40:05 +08002815static void mtree_print_flatview(fprintf_function p, void *f,
2816 AddressSpace *as)
2817{
2818 FlatView *view = address_space_get_flatview(as);
2819 FlatRange *range = &view->ranges[0];
2820 MemoryRegion *mr;
2821 int n = view->nr;
2822
2823 if (n <= 0) {
2824 p(f, MTREE_INDENT "No rendered FlatView for "
2825 "address space '%s'\n", as->name);
2826 flatview_unref(view);
2827 return;
2828 }
2829
2830 while (n--) {
2831 mr = range->mr;
Paolo Bonzini377a07a2017-03-02 22:49:41 +01002832 if (range->offset_in_region) {
2833 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2834 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n",
2835 int128_get64(range->addr.start),
2836 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2837 mr->priority,
2838 range->readonly ? "rom" : memory_region_type(mr),
2839 memory_region_name(mr),
2840 range->offset_in_region);
2841 } else {
2842 p(f, MTREE_INDENT TARGET_FMT_plx "-"
2843 TARGET_FMT_plx " (prio %d, %s): %s\n",
2844 int128_get64(range->addr.start),
2845 int128_get64(range->addr.start) + MR_SIZE(range->addr.size),
2846 mr->priority,
2847 range->readonly ? "rom" : memory_region_type(mr),
2848 memory_region_name(mr));
2849 }
Peter Xu57bb40c2017-01-16 16:40:05 +08002850 range++;
2851 }
2852
2853 flatview_unref(view);
2854}
2855
2856void mtree_info(fprintf_function mon_printf, void *f, bool flatview)
Blue Swirl314e2982011-09-11 20:22:05 +00002857{
2858 MemoryRegionListHead ml_head;
2859 MemoryRegionList *ml, *ml2;
Avi Kivity0d673e32012-10-02 15:28:50 +02002860 AddressSpace *as;
Blue Swirl314e2982011-09-11 20:22:05 +00002861
Peter Xu57bb40c2017-01-16 16:40:05 +08002862 if (flatview) {
2863 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2864 mon_printf(f, "address-space (flat view): %s\n", as->name);
2865 mtree_print_flatview(mon_printf, f, as);
2866 mon_printf(f, "\n");
2867 }
2868 return;
2869 }
2870
Blue Swirl314e2982011-09-11 20:22:05 +00002871 QTAILQ_INIT(&ml_head);
2872
Avi Kivity0d673e32012-10-02 15:28:50 +02002873 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002874 mon_printf(f, "address-space: %s\n", as->name);
2875 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head);
2876 mon_printf(f, "\n");
Blue Swirlb9f9be82012-03-10 16:58:35 +00002877 }
2878
Blue Swirl314e2982011-09-11 20:22:05 +00002879 /* print aliased regions */
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002880 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
Gerd Hoffmanne48816a2015-04-08 12:53:47 +02002881 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr));
2882 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head);
2883 mon_printf(f, "\n");
Blue Swirl314e2982011-09-11 20:22:05 +00002884 }
2885
Kamil Rytarowskia16878d2017-09-03 18:33:04 +02002886 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
Avi Kivity88365e42011-11-13 12:00:55 +02002887 g_free(ml);
Blue Swirl314e2982011-09-11 20:22:05 +00002888 }
Blue Swirl314e2982011-09-11 20:22:05 +00002889}
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002890
Peter Maydellb08199c2017-07-07 15:42:51 +01002891void memory_region_init_ram(MemoryRegion *mr,
2892 struct Object *owner,
2893 const char *name,
2894 uint64_t size,
2895 Error **errp)
2896{
2897 DeviceState *owner_dev;
2898 Error *err = NULL;
2899
2900 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
2901 if (err) {
2902 error_propagate(errp, err);
2903 return;
2904 }
2905 /* This will assert if owner is neither NULL nor a DeviceState.
2906 * We only want the owner here for the purposes of defining a
2907 * unique name for migration. TODO: Ideally we should implement
2908 * a naming scheme for Objects which are not DeviceStates, in
2909 * which case we can relax this restriction.
2910 */
2911 owner_dev = DEVICE(owner);
2912 vmstate_register_ram(mr, owner_dev);
2913}
2914
2915void memory_region_init_rom(MemoryRegion *mr,
2916 struct Object *owner,
2917 const char *name,
2918 uint64_t size,
2919 Error **errp)
2920{
2921 DeviceState *owner_dev;
2922 Error *err = NULL;
2923
2924 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
2925 if (err) {
2926 error_propagate(errp, err);
2927 return;
2928 }
2929 /* This will assert if owner is neither NULL nor a DeviceState.
2930 * We only want the owner here for the purposes of defining a
2931 * unique name for migration. TODO: Ideally we should implement
2932 * a naming scheme for Objects which are not DeviceStates, in
2933 * which case we can relax this restriction.
2934 */
2935 owner_dev = DEVICE(owner);
2936 vmstate_register_ram(mr, owner_dev);
2937}
2938
2939void memory_region_init_rom_device(MemoryRegion *mr,
2940 struct Object *owner,
2941 const MemoryRegionOps *ops,
2942 void *opaque,
2943 const char *name,
2944 uint64_t size,
2945 Error **errp)
2946{
2947 DeviceState *owner_dev;
2948 Error *err = NULL;
2949
2950 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
2951 name, size, &err);
2952 if (err) {
2953 error_propagate(errp, err);
2954 return;
2955 }
2956 /* This will assert if owner is neither NULL nor a DeviceState.
2957 * We only want the owner here for the purposes of defining a
2958 * unique name for migration. TODO: Ideally we should implement
2959 * a naming scheme for Objects which are not DeviceStates, in
2960 * which case we can relax this restriction.
2961 */
2962 owner_dev = DEVICE(owner);
2963 vmstate_register_ram(mr, owner_dev);
2964}
2965
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002966static const TypeInfo memory_region_info = {
2967 .parent = TYPE_OBJECT,
2968 .name = TYPE_MEMORY_REGION,
2969 .instance_size = sizeof(MemoryRegion),
2970 .instance_init = memory_region_initfn,
2971 .instance_finalize = memory_region_finalize,
2972};
2973
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002974static const TypeInfo iommu_memory_region_info = {
2975 .parent = TYPE_MEMORY_REGION,
2976 .name = TYPE_IOMMU_MEMORY_REGION,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10002977 .class_size = sizeof(IOMMUMemoryRegionClass),
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002978 .instance_size = sizeof(IOMMUMemoryRegion),
2979 .instance_init = iommu_memory_region_initfn,
Alexey Kardashevskiy1221a472017-07-11 13:56:20 +10002980 .abstract = true,
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002981};
2982
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002983static void memory_register_types(void)
2984{
2985 type_register_static(&memory_region_info);
Alexey Kardashevskiy3df9d742017-07-11 13:56:19 +10002986 type_register_static(&iommu_memory_region_info);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002987}
2988
2989type_init(memory_register_types)